1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
20 #include <linux/smc.h>
23 #include <rdma/ib_verbs.h>
24 #include <rdma/ib_cache.h>
33 #include "smc_close.h"
35 #include "smc_netlink.h"
36 #include "smc_stats.h"
38 #define SMC_LGR_NUM_INCR 256
39 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
40 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
42 struct smc_lgr_list smc_lgr_list = { /* established link groups */
43 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
44 .list = LIST_HEAD_INIT(smc_lgr_list.list),
48 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
49 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
51 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
52 struct smc_buf_desc *buf_desc);
53 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
55 static void smc_link_down_work(struct work_struct *work);
57 /* return head of link group list and its lock for a given link group */
58 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
59 spinlock_t **lgr_lock)
62 *lgr_lock = &lgr->smcd->lgr_lock;
63 return &lgr->smcd->lgr_list;
66 *lgr_lock = &smc_lgr_list.lock;
67 return &smc_lgr_list.list;
70 static void smc_ibdev_cnt_inc(struct smc_link *lnk)
72 atomic_inc(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
75 static void smc_ibdev_cnt_dec(struct smc_link *lnk)
77 atomic_dec(&lnk->smcibdev->lnk_cnt_by_port[lnk->ibport - 1]);
80 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
82 /* client link group creation always follows the server link group
83 * creation. For client use a somewhat higher removal delay time,
84 * otherwise there is a risk of out-of-sync link groups.
87 mod_delayed_work(system_wq, &lgr->free_work,
88 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
89 SMC_LGR_FREE_DELAY_CLNT :
90 SMC_LGR_FREE_DELAY_SERV);
94 /* Register connection's alert token in our lookup structure.
95 * To use rbtrees we have to implement our own insert core.
96 * Requires @conns_lock
97 * @smc connection to register
98 * Returns 0 on success, != otherwise.
100 static void smc_lgr_add_alert_token(struct smc_connection *conn)
102 struct rb_node **link, *parent = NULL;
103 u32 token = conn->alert_token_local;
105 link = &conn->lgr->conns_all.rb_node;
107 struct smc_connection *cur = rb_entry(*link,
108 struct smc_connection, alert_node);
111 if (cur->alert_token_local > token)
112 link = &parent->rb_left;
114 link = &parent->rb_right;
116 /* Put the new node there */
117 rb_link_node(&conn->alert_node, parent, link);
118 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
121 /* assign an SMC-R link to the connection */
122 static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
124 enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
128 /* do link balancing */
129 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
130 struct smc_link *lnk = &conn->lgr->lnk[i];
132 if (lnk->state != expected || lnk->link_is_asym)
134 if (conn->lgr->role == SMC_CLNT) {
135 conn->lnk = lnk; /* temporary, SMC server assigns link*/
138 if (conn->lgr->conns_num % 2) {
139 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
140 struct smc_link *lnk2;
142 lnk2 = &conn->lgr->lnk[j];
143 if (lnk2->state == expected &&
144 !lnk2->link_is_asym) {
155 return SMC_CLC_DECL_NOACTLINK;
156 atomic_inc(&conn->lnk->conn_cnt);
160 /* Register connection in link group by assigning an alert token
161 * registered in a search tree.
162 * Requires @conns_lock
163 * Note that '0' is a reserved value and not assigned.
165 static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
167 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
168 static atomic_t nexttoken = ATOMIC_INIT(0);
171 if (!conn->lgr->is_smcd) {
172 rc = smcr_lgr_conn_assign_link(conn, first);
176 /* find a new alert_token_local value not yet used by some connection
179 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
180 while (!conn->alert_token_local) {
181 conn->alert_token_local = atomic_inc_return(&nexttoken);
182 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
183 conn->alert_token_local = 0;
185 smc_lgr_add_alert_token(conn);
186 conn->lgr->conns_num++;
190 /* Unregister connection and reset the alert token of the given connection<
192 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
194 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
195 struct smc_link_group *lgr = conn->lgr;
197 rb_erase(&conn->alert_node, &lgr->conns_all);
199 atomic_dec(&conn->lnk->conn_cnt);
201 conn->alert_token_local = 0;
202 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
205 /* Unregister connection from lgr
207 static void smc_lgr_unregister_conn(struct smc_connection *conn)
209 struct smc_link_group *lgr = conn->lgr;
213 write_lock_bh(&lgr->conns_lock);
214 if (conn->alert_token_local) {
215 __smc_lgr_unregister_conn(conn);
217 write_unlock_bh(&lgr->conns_lock);
221 int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
223 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
224 char hostname[SMC_MAX_HOSTNAME_LEN + 1];
225 char smc_seid[SMC_MAX_EID_LEN + 1];
226 struct smcd_dev *smcd_dev;
227 struct nlattr *attrs;
232 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
233 &smc_gen_nl_family, NLM_F_MULTI,
234 SMC_NETLINK_GET_SYS_INFO);
239 attrs = nla_nest_start(skb, SMC_GEN_SYS_INFO);
242 if (nla_put_u8(skb, SMC_NLA_SYS_VER, SMC_V2))
244 if (nla_put_u8(skb, SMC_NLA_SYS_REL, SMC_RELEASE))
246 if (nla_put_u8(skb, SMC_NLA_SYS_IS_ISM_V2, smc_ism_is_v2_capable()))
248 smc_clc_get_hostname(&host);
250 memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
251 hostname[SMC_MAX_HOSTNAME_LEN] = 0;
252 if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
255 mutex_lock(&smcd_dev_list.mutex);
256 smcd_dev = list_first_entry_or_null(&smcd_dev_list.list,
257 struct smcd_dev, list);
259 smc_ism_get_system_eid(smcd_dev, &seid);
260 mutex_unlock(&smcd_dev_list.mutex);
261 if (seid && smc_ism_is_v2_capable()) {
262 memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
263 smc_seid[SMC_MAX_EID_LEN] = 0;
264 if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
267 nla_nest_end(skb, attrs);
268 genlmsg_end(skb, nlh);
273 nla_nest_cancel(skb, attrs);
275 genlmsg_cancel(skb, nlh);
280 static int smc_nl_fill_lgr(struct smc_link_group *lgr,
282 struct netlink_callback *cb)
284 char smc_target[SMC_MAX_PNETID_LEN + 1];
285 struct nlattr *attrs;
287 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCR);
291 if (nla_put_u32(skb, SMC_NLA_LGR_R_ID, *((u32 *)&lgr->id)))
293 if (nla_put_u32(skb, SMC_NLA_LGR_R_CONNS_NUM, lgr->conns_num))
295 if (nla_put_u8(skb, SMC_NLA_LGR_R_ROLE, lgr->role))
297 if (nla_put_u8(skb, SMC_NLA_LGR_R_TYPE, lgr->type))
299 if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
301 memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
302 smc_target[SMC_MAX_PNETID_LEN] = 0;
303 if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
306 nla_nest_end(skb, attrs);
309 nla_nest_cancel(skb, attrs);
314 static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
315 struct smc_link *link,
317 struct netlink_callback *cb)
319 char smc_ibname[IB_DEVICE_NAME_MAX];
320 u8 smc_gid_target[41];
321 struct nlattr *attrs;
325 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
326 &smc_gen_nl_family, NLM_F_MULTI,
327 SMC_NETLINK_GET_LINK_SMCR);
331 attrs = nla_nest_start(skb, SMC_GEN_LINK_SMCR);
335 if (nla_put_u8(skb, SMC_NLA_LINK_ID, link->link_id))
337 if (nla_put_u32(skb, SMC_NLA_LINK_STATE, link->state))
339 if (nla_put_u32(skb, SMC_NLA_LINK_CONN_CNT,
340 atomic_read(&link->conn_cnt)))
342 if (nla_put_u8(skb, SMC_NLA_LINK_IB_PORT, link->ibport))
344 if (nla_put_u32(skb, SMC_NLA_LINK_NET_DEV, link->ndev_ifidx))
346 snprintf(smc_ibname, sizeof(smc_ibname), "%s", link->ibname);
347 if (nla_put_string(skb, SMC_NLA_LINK_IB_DEV, smc_ibname))
349 memcpy(&link_uid, link->link_uid, sizeof(link_uid));
350 if (nla_put_u32(skb, SMC_NLA_LINK_UID, link_uid))
352 memcpy(&link_uid, link->peer_link_uid, sizeof(link_uid));
353 if (nla_put_u32(skb, SMC_NLA_LINK_PEER_UID, link_uid))
355 memset(smc_gid_target, 0, sizeof(smc_gid_target));
356 smc_gid_be16_convert(smc_gid_target, link->gid);
357 if (nla_put_string(skb, SMC_NLA_LINK_GID, smc_gid_target))
359 memset(smc_gid_target, 0, sizeof(smc_gid_target));
360 smc_gid_be16_convert(smc_gid_target, link->peer_gid);
361 if (nla_put_string(skb, SMC_NLA_LINK_PEER_GID, smc_gid_target))
364 nla_nest_end(skb, attrs);
365 genlmsg_end(skb, nlh);
368 nla_nest_cancel(skb, attrs);
370 genlmsg_cancel(skb, nlh);
375 static int smc_nl_handle_lgr(struct smc_link_group *lgr,
377 struct netlink_callback *cb,
383 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
384 &smc_gen_nl_family, NLM_F_MULTI,
385 SMC_NETLINK_GET_LGR_SMCR);
388 if (smc_nl_fill_lgr(lgr, skb, cb))
391 genlmsg_end(skb, nlh);
394 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
395 if (!smc_link_usable(&lgr->lnk[i]))
397 if (smc_nl_fill_lgr_link(lgr, &lgr->lnk[i], skb, cb))
404 genlmsg_cancel(skb, nlh);
409 static void smc_nl_fill_lgr_list(struct smc_lgr_list *smc_lgr,
411 struct netlink_callback *cb,
414 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
415 struct smc_link_group *lgr;
416 int snum = cb_ctx->pos[0];
419 spin_lock_bh(&smc_lgr->lock);
420 list_for_each_entry(lgr, &smc_lgr->list, list) {
423 if (smc_nl_handle_lgr(lgr, skb, cb, list_links))
429 spin_unlock_bh(&smc_lgr->lock);
430 cb_ctx->pos[0] = num;
433 static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
435 struct netlink_callback *cb)
437 char smc_host[SMC_MAX_HOSTNAME_LEN + 1];
438 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
439 char smc_eid[SMC_MAX_EID_LEN + 1];
440 struct nlattr *v2_attrs;
441 struct nlattr *attrs;
444 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
445 &smc_gen_nl_family, NLM_F_MULTI,
446 SMC_NETLINK_GET_LGR_SMCD);
450 attrs = nla_nest_start(skb, SMC_GEN_LGR_SMCD);
454 if (nla_put_u32(skb, SMC_NLA_LGR_D_ID, *((u32 *)&lgr->id)))
456 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_GID, lgr->smcd->local_gid,
459 if (nla_put_u64_64bit(skb, SMC_NLA_LGR_D_PEER_GID, lgr->peer_gid,
462 if (nla_put_u8(skb, SMC_NLA_LGR_D_VLAN_ID, lgr->vlan_id))
464 if (nla_put_u32(skb, SMC_NLA_LGR_D_CONNS_NUM, lgr->conns_num))
466 if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
468 memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
469 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
470 if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
473 v2_attrs = nla_nest_start(skb, SMC_NLA_LGR_V2);
476 if (nla_put_u8(skb, SMC_NLA_LGR_V2_VER, lgr->smc_version))
478 if (nla_put_u8(skb, SMC_NLA_LGR_V2_REL, lgr->peer_smc_release))
480 if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
482 memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
483 smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
484 if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
486 memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
487 smc_eid[SMC_MAX_EID_LEN] = 0;
488 if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
491 nla_nest_end(skb, v2_attrs);
492 nla_nest_end(skb, attrs);
493 genlmsg_end(skb, nlh);
497 nla_nest_cancel(skb, v2_attrs);
499 nla_nest_cancel(skb, attrs);
501 genlmsg_cancel(skb, nlh);
506 static int smc_nl_handle_smcd_lgr(struct smcd_dev *dev,
508 struct netlink_callback *cb)
510 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
511 struct smc_link_group *lgr;
512 int snum = cb_ctx->pos[1];
515 spin_lock_bh(&dev->lgr_lock);
516 list_for_each_entry(lgr, &dev->lgr_list, list) {
521 rc = smc_nl_fill_smcd_lgr(lgr, skb, cb);
528 spin_unlock_bh(&dev->lgr_lock);
529 cb_ctx->pos[1] = num;
533 static int smc_nl_fill_smcd_dev(struct smcd_dev_list *dev_list,
535 struct netlink_callback *cb)
537 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
538 struct smcd_dev *smcd_dev;
539 int snum = cb_ctx->pos[0];
542 mutex_lock(&dev_list->mutex);
543 list_for_each_entry(smcd_dev, &dev_list->list, list) {
544 if (list_empty(&smcd_dev->lgr_list))
548 rc = smc_nl_handle_smcd_lgr(smcd_dev, skb, cb);
555 mutex_unlock(&dev_list->mutex);
556 cb_ctx->pos[0] = num;
560 int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
562 bool list_links = false;
564 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
568 int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb)
570 bool list_links = true;
572 smc_nl_fill_lgr_list(&smc_lgr_list, skb, cb, list_links);
576 int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
578 smc_nl_fill_smcd_dev(&smcd_dev_list, skb, cb);
582 void smc_lgr_cleanup_early(struct smc_connection *conn)
584 struct smc_link_group *lgr = conn->lgr;
585 struct list_head *lgr_list;
586 spinlock_t *lgr_lock;
592 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
593 spin_lock_bh(lgr_lock);
594 /* do not use this link group for new connections */
595 if (!list_empty(lgr_list))
596 list_del_init(lgr_list);
597 spin_unlock_bh(lgr_lock);
598 __smc_lgr_terminate(lgr, true);
601 static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
605 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
606 struct smc_link *lnk = &lgr->lnk[i];
608 if (smc_link_usable(lnk))
609 lnk->state = SMC_LNK_INACTIVE;
611 wake_up_all(&lgr->llc_msg_waiter);
612 wake_up_all(&lgr->llc_flow_waiter);
615 static void smc_lgr_free(struct smc_link_group *lgr);
617 static void smc_lgr_free_work(struct work_struct *work)
619 struct smc_link_group *lgr = container_of(to_delayed_work(work),
620 struct smc_link_group,
622 spinlock_t *lgr_lock;
625 smc_lgr_list_head(lgr, &lgr_lock);
626 spin_lock_bh(lgr_lock);
628 spin_unlock_bh(lgr_lock);
631 read_lock_bh(&lgr->conns_lock);
632 conns = RB_EMPTY_ROOT(&lgr->conns_all);
633 read_unlock_bh(&lgr->conns_lock);
634 if (!conns) { /* number of lgr connections is no longer zero */
635 spin_unlock_bh(lgr_lock);
638 list_del_init(&lgr->list); /* remove from smc_lgr_list */
639 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
640 spin_unlock_bh(lgr_lock);
641 cancel_delayed_work(&lgr->free_work);
643 if (!lgr->is_smcd && !lgr->terminating)
644 smc_llc_send_link_delete_all(lgr, true,
645 SMC_LLC_DEL_PROG_INIT_TERM);
646 if (lgr->is_smcd && !lgr->terminating)
647 smc_ism_signal_shutdown(lgr);
649 smcr_lgr_link_deactivate_all(lgr);
653 static void smc_lgr_terminate_work(struct work_struct *work)
655 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
658 __smc_lgr_terminate(lgr, true);
661 /* return next unique link id for the lgr */
662 static u8 smcr_next_link_id(struct smc_link_group *lgr)
669 link_id = ++lgr->next_link_id;
670 if (!link_id) /* skip zero as link_id */
671 link_id = ++lgr->next_link_id;
672 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
673 if (smc_link_usable(&lgr->lnk[i]) &&
674 lgr->lnk[i].link_id == link_id)
682 static void smcr_copy_dev_info_to_link(struct smc_link *link)
684 struct smc_ib_device *smcibdev = link->smcibdev;
686 snprintf(link->ibname, sizeof(link->ibname), "%s",
687 smcibdev->ibdev->name);
688 link->ndev_ifidx = smcibdev->ndev_ifidx[link->ibport - 1];
691 int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
692 u8 link_idx, struct smc_init_info *ini)
697 get_device(&ini->ib_dev->ibdev->dev);
698 atomic_inc(&ini->ib_dev->lnk_cnt);
699 lnk->link_id = smcr_next_link_id(lgr);
701 lnk->link_idx = link_idx;
702 lnk->smcibdev = ini->ib_dev;
703 lnk->ibport = ini->ib_port;
704 smc_ibdev_cnt_inc(lnk);
705 smcr_copy_dev_info_to_link(lnk);
706 lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
707 atomic_set(&lnk->conn_cnt, 0);
708 smc_llc_link_set_uid(lnk);
709 INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
710 if (!ini->ib_dev->initialized) {
711 rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
715 get_random_bytes(rndvec, sizeof(rndvec));
716 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
718 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
719 ini->vlan_id, lnk->gid, &lnk->sgid_index);
722 rc = smc_llc_link_init(lnk);
725 rc = smc_wr_alloc_link_mem(lnk);
728 rc = smc_ib_create_protection_domain(lnk);
731 rc = smc_ib_create_queue_pair(lnk);
734 rc = smc_wr_create_link(lnk);
737 lnk->state = SMC_LNK_ACTIVATING;
741 smc_ib_destroy_queue_pair(lnk);
743 smc_ib_dealloc_protection_domain(lnk);
745 smc_wr_free_link_mem(lnk);
747 smc_llc_link_clear(lnk, false);
749 smc_ibdev_cnt_dec(lnk);
750 put_device(&ini->ib_dev->ibdev->dev);
751 memset(lnk, 0, sizeof(struct smc_link));
752 lnk->state = SMC_LNK_UNUSED;
753 if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
754 wake_up(&ini->ib_dev->lnks_deleted);
758 /* create a new SMC link group */
759 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
761 struct smc_link_group *lgr;
762 struct list_head *lgr_list;
763 struct smc_link *lnk;
764 spinlock_t *lgr_lock;
769 if (ini->is_smcd && ini->vlan_id) {
770 if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
772 rc = SMC_CLC_DECL_ISMVLANERR;
777 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
779 rc = SMC_CLC_DECL_MEM;
782 lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
783 SMC_LGR_ID_SIZE, &lgr->id);
788 lgr->is_smcd = ini->is_smcd;
790 lgr->terminating = 0;
792 lgr->vlan_id = ini->vlan_id;
793 mutex_init(&lgr->sndbufs_lock);
794 mutex_init(&lgr->rmbs_lock);
795 rwlock_init(&lgr->conns_lock);
796 for (i = 0; i < SMC_RMBE_SIZES; i++) {
797 INIT_LIST_HEAD(&lgr->sndbufs[i]);
798 INIT_LIST_HEAD(&lgr->rmbs[i]);
800 lgr->next_link_id = 0;
801 smc_lgr_list.num += SMC_LGR_NUM_INCR;
802 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
803 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
804 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
805 lgr->conns_all = RB_ROOT;
807 /* SMC-D specific settings */
808 get_device(&ini->ism_dev[ini->ism_selected]->dev);
809 lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
810 lgr->smcd = ini->ism_dev[ini->ism_selected];
811 lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
812 lgr_lock = &lgr->smcd->lgr_lock;
813 lgr->smc_version = ini->smcd_version;
814 lgr->peer_shutdown = 0;
815 atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
817 /* SMC-R specific settings */
818 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
819 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
821 memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
823 smc_llc_lgr_init(lgr, smc);
825 link_idx = SMC_SINGLE_LINK;
826 lnk = &lgr->lnk[link_idx];
827 rc = smcr_link_init(lgr, lnk, link_idx, ini);
830 lgr_list = &smc_lgr_list.list;
831 lgr_lock = &smc_lgr_list.lock;
832 atomic_inc(&lgr_cnt);
835 spin_lock_bh(lgr_lock);
836 list_add_tail(&lgr->list, lgr_list);
837 spin_unlock_bh(lgr_lock);
841 destroy_workqueue(lgr->tx_wq);
845 if (ini->is_smcd && ini->vlan_id)
846 smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
850 rc = SMC_CLC_DECL_MEM;
852 rc = SMC_CLC_DECL_INTERR;
857 static int smc_write_space(struct smc_connection *conn)
859 int buffer_len = conn->peer_rmbe_size;
860 union smc_host_cursor prod;
861 union smc_host_cursor cons;
864 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
865 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
866 /* determine rx_buf space */
867 space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
871 static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
872 struct smc_wr_buf *wr_buf)
874 struct smc_connection *conn = &smc->conn;
875 union smc_host_cursor cons, fin;
879 smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
880 smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
881 /* set prod cursor to old state, enforce tx_rdma_writes() */
882 smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
883 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
885 if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
886 /* cons cursor advanced more than fin, and prod was set
887 * fin above, so now prod is smaller than cons. Fix that.
889 diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
890 smc_curs_add(conn->sndbuf_desc->len,
891 &conn->tx_curs_sent, diff);
892 smc_curs_add(conn->sndbuf_desc->len,
893 &conn->tx_curs_fin, diff);
895 smp_mb__before_atomic();
896 atomic_add(diff, &conn->sndbuf_space);
897 smp_mb__after_atomic();
899 smc_curs_add(conn->peer_rmbe_size,
900 &conn->local_tx_ctrl.prod, diff);
901 smc_curs_add(conn->peer_rmbe_size,
902 &conn->local_tx_ctrl_fin, diff);
904 /* recalculate, value is used by tx_rdma_writes() */
905 atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
907 if (smc->sk.sk_state != SMC_INIT &&
908 smc->sk.sk_state != SMC_CLOSED) {
909 rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
911 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
912 smc->sk.sk_data_ready(&smc->sk);
915 smc_wr_tx_put_slot(conn->lnk,
916 (struct smc_wr_tx_pend_priv *)pend);
921 void smc_switch_link_and_count(struct smc_connection *conn,
922 struct smc_link *to_lnk)
924 atomic_dec(&conn->lnk->conn_cnt);
926 atomic_inc(&conn->lnk->conn_cnt);
929 struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
930 struct smc_link *from_lnk, bool is_dev_err)
932 struct smc_link *to_lnk = NULL;
933 struct smc_cdc_tx_pend *pend;
934 struct smc_connection *conn;
935 struct smc_wr_buf *wr_buf;
936 struct smc_sock *smc;
937 struct rb_node *node;
940 /* link is inactive, wake up tx waiters */
941 smc_wr_wakeup_tx_wait(from_lnk);
943 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
944 if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
946 if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
947 from_lnk->ibport == lgr->lnk[i].ibport) {
950 to_lnk = &lgr->lnk[i];
953 if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
954 smc_lgr_terminate_sched(lgr);
958 read_lock_bh(&lgr->conns_lock);
959 for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
960 conn = rb_entry(node, struct smc_connection, alert_node);
961 if (conn->lnk != from_lnk)
963 smc = container_of(conn, struct smc_sock, conn);
964 /* conn->lnk not yet set in SMC_INIT state */
965 if (smc->sk.sk_state == SMC_INIT)
967 if (smc->sk.sk_state == SMC_CLOSED ||
968 smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
969 smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
970 smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
971 smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
972 smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
973 smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
974 smc->sk.sk_state == SMC_PEERABORTWAIT ||
975 smc->sk.sk_state == SMC_PROCESSABORT) {
976 spin_lock_bh(&conn->send_lock);
977 smc_switch_link_and_count(conn, to_lnk);
978 spin_unlock_bh(&conn->send_lock);
982 read_unlock_bh(&lgr->conns_lock);
983 /* pre-fetch buffer outside of send_lock, might sleep */
984 rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
987 /* avoid race with smcr_tx_sndbuf_nonempty() */
988 spin_lock_bh(&conn->send_lock);
989 smc_switch_link_and_count(conn, to_lnk);
990 rc = smc_switch_cursor(smc, pend, wr_buf);
991 spin_unlock_bh(&conn->send_lock);
997 read_unlock_bh(&lgr->conns_lock);
998 smc_wr_tx_link_put(to_lnk);
1002 smcr_link_down_cond_sched(to_lnk);
1003 smc_wr_tx_link_put(to_lnk);
1007 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
1008 struct smc_link_group *lgr)
1012 if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
1013 /* unregister rmb with peer */
1014 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
1016 /* protect against smc_llc_cli_rkey_exchange() */
1017 mutex_lock(&lgr->llc_conf_mutex);
1018 smc_llc_do_delete_rkey(lgr, rmb_desc);
1019 rmb_desc->is_conf_rkey = false;
1020 mutex_unlock(&lgr->llc_conf_mutex);
1021 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1025 if (rmb_desc->is_reg_err) {
1026 /* buf registration failed, reuse not possible */
1027 mutex_lock(&lgr->rmbs_lock);
1028 list_del(&rmb_desc->list);
1029 mutex_unlock(&lgr->rmbs_lock);
1031 smc_buf_free(lgr, true, rmb_desc);
1037 static void smc_buf_unuse(struct smc_connection *conn,
1038 struct smc_link_group *lgr)
1040 if (conn->sndbuf_desc)
1041 conn->sndbuf_desc->used = 0;
1042 if (conn->rmb_desc && lgr->is_smcd)
1043 conn->rmb_desc->used = 0;
1044 else if (conn->rmb_desc)
1045 smcr_buf_unuse(conn->rmb_desc, lgr);
1048 /* remove a finished connection from its link group */
1049 void smc_conn_free(struct smc_connection *conn)
1051 struct smc_link_group *lgr = conn->lgr;
1056 if (!list_empty(&lgr->list))
1057 smc_ism_unset_conn(conn);
1058 tasklet_kill(&conn->rx_tsklet);
1060 smc_cdc_tx_dismiss_slots(conn);
1061 if (current_work() != &conn->abort_work)
1062 cancel_work_sync(&conn->abort_work);
1064 if (!list_empty(&lgr->list)) {
1065 smc_lgr_unregister_conn(conn);
1066 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
1069 if (!lgr->conns_num)
1070 smc_lgr_schedule_free_work(lgr);
1073 /* unregister a link from a buf_desc */
1074 static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1075 struct smc_link *lnk)
1078 buf_desc->is_reg_mr[lnk->link_idx] = false;
1079 if (!buf_desc->is_map_ib[lnk->link_idx])
1082 if (buf_desc->mr_rx[lnk->link_idx]) {
1083 smc_ib_put_memory_region(
1084 buf_desc->mr_rx[lnk->link_idx]);
1085 buf_desc->mr_rx[lnk->link_idx] = NULL;
1087 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
1089 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
1091 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1092 buf_desc->is_map_ib[lnk->link_idx] = false;
1095 /* unmap all buffers of lgr for a deleted link */
1096 static void smcr_buf_unmap_lgr(struct smc_link *lnk)
1098 struct smc_link_group *lgr = lnk->lgr;
1099 struct smc_buf_desc *buf_desc, *bf;
1102 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1103 mutex_lock(&lgr->rmbs_lock);
1104 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
1105 smcr_buf_unmap_link(buf_desc, true, lnk);
1106 mutex_unlock(&lgr->rmbs_lock);
1107 mutex_lock(&lgr->sndbufs_lock);
1108 list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
1110 smcr_buf_unmap_link(buf_desc, false, lnk);
1111 mutex_unlock(&lgr->sndbufs_lock);
1115 static void smcr_rtoken_clear_link(struct smc_link *lnk)
1117 struct smc_link_group *lgr = lnk->lgr;
1120 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1121 lgr->rtokens[i][lnk->link_idx].rkey = 0;
1122 lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
1126 /* must be called under lgr->llc_conf_mutex lock */
1127 void smcr_link_clear(struct smc_link *lnk, bool log)
1129 struct smc_ib_device *smcibdev;
1131 if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
1134 smc_llc_link_clear(lnk, log);
1135 smcr_buf_unmap_lgr(lnk);
1136 smcr_rtoken_clear_link(lnk);
1137 smc_ib_modify_qp_reset(lnk);
1138 smc_wr_free_link(lnk);
1139 smc_ib_destroy_queue_pair(lnk);
1140 smc_ib_dealloc_protection_domain(lnk);
1141 smc_wr_free_link_mem(lnk);
1142 smc_ibdev_cnt_dec(lnk);
1143 put_device(&lnk->smcibdev->ibdev->dev);
1144 smcibdev = lnk->smcibdev;
1145 memset(lnk, 0, sizeof(struct smc_link));
1146 lnk->state = SMC_LNK_UNUSED;
1147 if (!atomic_dec_return(&smcibdev->lnk_cnt))
1148 wake_up(&smcibdev->lnks_deleted);
1151 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
1152 struct smc_buf_desc *buf_desc)
1156 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1157 smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
1159 if (buf_desc->pages)
1160 __free_pages(buf_desc->pages, buf_desc->order);
1164 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
1165 struct smc_buf_desc *buf_desc)
1168 /* restore original buf len */
1169 buf_desc->len += sizeof(struct smcd_cdc_msg);
1170 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
1172 kfree(buf_desc->cpu_addr);
1177 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
1178 struct smc_buf_desc *buf_desc)
1181 smcd_buf_free(lgr, is_rmb, buf_desc);
1183 smcr_buf_free(lgr, is_rmb, buf_desc);
1186 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
1188 struct smc_buf_desc *buf_desc, *bf_desc;
1189 struct list_head *buf_list;
1192 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1194 buf_list = &lgr->rmbs[i];
1196 buf_list = &lgr->sndbufs[i];
1197 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
1199 list_del(&buf_desc->list);
1200 smc_buf_free(lgr, is_rmb, buf_desc);
1205 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
1207 /* free send buffers */
1208 __smc_lgr_free_bufs(lgr, false);
1210 __smc_lgr_free_bufs(lgr, true);
1213 /* remove a link group */
1214 static void smc_lgr_free(struct smc_link_group *lgr)
1218 if (!lgr->is_smcd) {
1219 mutex_lock(&lgr->llc_conf_mutex);
1220 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1221 if (lgr->lnk[i].state != SMC_LNK_UNUSED)
1222 smcr_link_clear(&lgr->lnk[i], false);
1224 mutex_unlock(&lgr->llc_conf_mutex);
1225 smc_llc_lgr_clear(lgr);
1228 smc_lgr_free_bufs(lgr);
1229 destroy_workqueue(lgr->tx_wq);
1231 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
1232 put_device(&lgr->smcd->dev);
1233 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
1234 wake_up(&lgr->smcd->lgrs_deleted);
1236 if (!atomic_dec_return(&lgr_cnt))
1237 wake_up(&lgrs_deleted);
1242 static void smc_sk_wake_ups(struct smc_sock *smc)
1244 smc->sk.sk_write_space(&smc->sk);
1245 smc->sk.sk_data_ready(&smc->sk);
1246 smc->sk.sk_state_change(&smc->sk);
1249 /* kill a connection */
1250 static void smc_conn_kill(struct smc_connection *conn, bool soft)
1252 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1254 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
1255 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
1257 smc_close_abort(conn);
1259 smc->sk.sk_err = ECONNABORTED;
1260 smc_sk_wake_ups(smc);
1261 if (conn->lgr->is_smcd) {
1262 smc_ism_unset_conn(conn);
1264 tasklet_kill(&conn->rx_tsklet);
1266 tasklet_unlock_wait(&conn->rx_tsklet);
1268 smc_cdc_tx_dismiss_slots(conn);
1270 smc_lgr_unregister_conn(conn);
1271 smc_close_active_abort(smc);
1274 static void smc_lgr_cleanup(struct smc_link_group *lgr)
1277 smc_ism_signal_shutdown(lgr);
1279 u32 rsn = lgr->llc_termination_rsn;
1282 rsn = SMC_LLC_DEL_PROG_INIT_TERM;
1283 smc_llc_send_link_delete_all(lgr, false, rsn);
1284 smcr_lgr_link_deactivate_all(lgr);
1288 /* terminate link group
1289 * @soft: true if link group shutdown can take its time
1290 * false if immediate link group shutdown is required
1292 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
1294 struct smc_connection *conn;
1295 struct smc_sock *smc;
1296 struct rb_node *node;
1298 if (lgr->terminating)
1299 return; /* lgr already terminating */
1300 /* cancel free_work sync, will terminate when lgr->freeing is set */
1301 cancel_delayed_work_sync(&lgr->free_work);
1302 lgr->terminating = 1;
1304 /* kill remaining link group connections */
1305 read_lock_bh(&lgr->conns_lock);
1306 node = rb_first(&lgr->conns_all);
1308 read_unlock_bh(&lgr->conns_lock);
1309 conn = rb_entry(node, struct smc_connection, alert_node);
1310 smc = container_of(conn, struct smc_sock, conn);
1311 sock_hold(&smc->sk); /* sock_put below */
1312 lock_sock(&smc->sk);
1313 smc_conn_kill(conn, soft);
1314 release_sock(&smc->sk);
1315 sock_put(&smc->sk); /* sock_hold above */
1316 read_lock_bh(&lgr->conns_lock);
1317 node = rb_first(&lgr->conns_all);
1319 read_unlock_bh(&lgr->conns_lock);
1320 smc_lgr_cleanup(lgr);
1324 /* unlink link group and schedule termination */
1325 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
1327 spinlock_t *lgr_lock;
1329 smc_lgr_list_head(lgr, &lgr_lock);
1330 spin_lock_bh(lgr_lock);
1331 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
1332 spin_unlock_bh(lgr_lock);
1333 return; /* lgr already terminating */
1335 list_del_init(&lgr->list);
1337 spin_unlock_bh(lgr_lock);
1338 schedule_work(&lgr->terminate_work);
1341 /* Called when peer lgr shutdown (regularly or abnormally) is received */
1342 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
1344 struct smc_link_group *lgr, *l;
1345 LIST_HEAD(lgr_free_list);
1347 /* run common cleanup function and build free list */
1348 spin_lock_bh(&dev->lgr_lock);
1349 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
1350 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
1351 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
1352 if (peer_gid) /* peer triggered termination */
1353 lgr->peer_shutdown = 1;
1354 list_move(&lgr->list, &lgr_free_list);
1358 spin_unlock_bh(&dev->lgr_lock);
1360 /* cancel the regular free workers and actually free lgrs */
1361 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
1362 list_del_init(&lgr->list);
1363 schedule_work(&lgr->terminate_work);
1367 /* Called when an SMCD device is removed or the smc module is unloaded */
1368 void smc_smcd_terminate_all(struct smcd_dev *smcd)
1370 struct smc_link_group *lgr, *lg;
1371 LIST_HEAD(lgr_free_list);
1373 spin_lock_bh(&smcd->lgr_lock);
1374 list_splice_init(&smcd->lgr_list, &lgr_free_list);
1375 list_for_each_entry(lgr, &lgr_free_list, list)
1377 spin_unlock_bh(&smcd->lgr_lock);
1379 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1380 list_del_init(&lgr->list);
1381 __smc_lgr_terminate(lgr, false);
1384 if (atomic_read(&smcd->lgr_cnt))
1385 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
1388 /* Called when an SMCR device is removed or the smc module is unloaded.
1389 * If smcibdev is given, all SMCR link groups using this device are terminated.
1390 * If smcibdev is NULL, all SMCR link groups are terminated.
1392 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
1394 struct smc_link_group *lgr, *lg;
1395 LIST_HEAD(lgr_free_list);
1398 spin_lock_bh(&smc_lgr_list.lock);
1400 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
1401 list_for_each_entry(lgr, &lgr_free_list, list)
1404 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
1405 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1406 if (lgr->lnk[i].smcibdev == smcibdev)
1407 smcr_link_down_cond_sched(&lgr->lnk[i]);
1411 spin_unlock_bh(&smc_lgr_list.lock);
1413 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
1414 list_del_init(&lgr->list);
1415 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
1416 __smc_lgr_terminate(lgr, false);
1420 if (atomic_read(&smcibdev->lnk_cnt))
1421 wait_event(smcibdev->lnks_deleted,
1422 !atomic_read(&smcibdev->lnk_cnt));
1424 if (atomic_read(&lgr_cnt))
1425 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
1429 /* set new lgr type and clear all asymmetric link tagging */
1430 void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
1432 char *lgr_type = "";
1435 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
1436 if (smc_link_usable(&lgr->lnk[i]))
1437 lgr->lnk[i].link_is_asym = false;
1438 if (lgr->type == new_type)
1440 lgr->type = new_type;
1442 switch (lgr->type) {
1446 case SMC_LGR_SINGLE:
1447 lgr_type = "SINGLE";
1449 case SMC_LGR_SYMMETRIC:
1450 lgr_type = "SYMMETRIC";
1452 case SMC_LGR_ASYMMETRIC_PEER:
1453 lgr_type = "ASYMMETRIC_PEER";
1455 case SMC_LGR_ASYMMETRIC_LOCAL:
1456 lgr_type = "ASYMMETRIC_LOCAL";
1459 pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
1460 "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
1461 lgr_type, lgr->pnet_id);
1464 /* set new lgr type and tag a link as asymmetric */
1465 void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
1466 enum smc_lgr_type new_type, int asym_lnk_idx)
1468 smcr_lgr_set_type(lgr, new_type);
1469 lgr->lnk[asym_lnk_idx].link_is_asym = true;
1472 /* abort connection, abort_work scheduled from tasklet context */
1473 static void smc_conn_abort_work(struct work_struct *work)
1475 struct smc_connection *conn = container_of(work,
1476 struct smc_connection,
1478 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
1480 lock_sock(&smc->sk);
1481 smc_conn_kill(conn, true);
1482 release_sock(&smc->sk);
1483 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
1486 void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
1488 struct smc_link_group *lgr, *n;
1490 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1491 struct smc_link *link;
1493 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1494 SMC_MAX_PNETID_LEN) ||
1495 lgr->type == SMC_LGR_SYMMETRIC ||
1496 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
1499 /* trigger local add link processing */
1500 link = smc_llc_usable_link(lgr);
1502 smc_llc_add_link_local(link);
1506 /* link is down - switch connections to alternate link,
1507 * must be called under lgr->llc_conf_mutex lock
1509 static void smcr_link_down(struct smc_link *lnk)
1511 struct smc_link_group *lgr = lnk->lgr;
1512 struct smc_link *to_lnk;
1515 if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
1518 smc_ib_modify_qp_reset(lnk);
1519 to_lnk = smc_switch_conns(lgr, lnk, true);
1520 if (!to_lnk) { /* no backup link available */
1521 smcr_link_clear(lnk, true);
1524 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1525 del_link_id = lnk->link_id;
1527 if (lgr->role == SMC_SERV) {
1528 /* trigger local delete link processing */
1529 smc_llc_srv_delete_link_local(to_lnk, del_link_id);
1531 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1532 /* another llc task is ongoing */
1533 mutex_unlock(&lgr->llc_conf_mutex);
1534 wait_event_timeout(lgr->llc_flow_waiter,
1535 (list_empty(&lgr->list) ||
1536 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
1538 mutex_lock(&lgr->llc_conf_mutex);
1540 if (!list_empty(&lgr->list)) {
1541 smc_llc_send_delete_link(to_lnk, del_link_id,
1543 SMC_LLC_DEL_LOST_PATH);
1544 smcr_link_clear(lnk, true);
1546 wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
1550 /* must be called under lgr->llc_conf_mutex lock */
1551 void smcr_link_down_cond(struct smc_link *lnk)
1553 if (smc_link_downing(&lnk->state))
1554 smcr_link_down(lnk);
1557 /* will get the lgr->llc_conf_mutex lock */
1558 void smcr_link_down_cond_sched(struct smc_link *lnk)
1560 if (smc_link_downing(&lnk->state))
1561 schedule_work(&lnk->link_down_wrk);
1564 void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
1566 struct smc_link_group *lgr, *n;
1569 list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
1570 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
1571 SMC_MAX_PNETID_LEN))
1572 continue; /* lgr is not affected */
1573 if (list_empty(&lgr->list))
1575 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1576 struct smc_link *lnk = &lgr->lnk[i];
1578 if (smc_link_usable(lnk) &&
1579 lnk->smcibdev == smcibdev && lnk->ibport == ibport)
1580 smcr_link_down_cond_sched(lnk);
1585 static void smc_link_down_work(struct work_struct *work)
1587 struct smc_link *link = container_of(work, struct smc_link,
1589 struct smc_link_group *lgr = link->lgr;
1591 if (list_empty(&lgr->list))
1593 wake_up_all(&lgr->llc_msg_waiter);
1594 mutex_lock(&lgr->llc_conf_mutex);
1595 smcr_link_down(link);
1596 mutex_unlock(&lgr->llc_conf_mutex);
1599 static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
1600 struct netdev_nested_priv *priv)
1602 unsigned short *vlan_id = (unsigned short *)priv->data;
1604 if (is_vlan_dev(lower_dev)) {
1605 *vlan_id = vlan_dev_vlan_id(lower_dev);
1612 /* Determine vlan of internal TCP socket. */
1613 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
1615 struct dst_entry *dst = sk_dst_get(clcsock->sk);
1616 struct netdev_nested_priv priv;
1617 struct net_device *ndev;
1631 if (is_vlan_dev(ndev)) {
1632 ini->vlan_id = vlan_dev_vlan_id(ndev);
1636 priv.data = (void *)&ini->vlan_id;
1638 netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
1647 static bool smcr_lgr_match(struct smc_link_group *lgr,
1648 struct smc_clc_msg_local *lcl,
1649 enum smc_lgr_role role, u32 clcqpn)
1653 if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
1657 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1658 if (!smc_link_active(&lgr->lnk[i]))
1660 if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
1661 !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
1662 !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
1668 static bool smcd_lgr_match(struct smc_link_group *lgr,
1669 struct smcd_dev *smcismdev, u64 peer_gid)
1671 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
1674 /* create a new SMC connection (and a new link group if necessary) */
1675 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
1677 struct smc_connection *conn = &smc->conn;
1678 struct list_head *lgr_list;
1679 struct smc_link_group *lgr;
1680 enum smc_lgr_role role;
1681 spinlock_t *lgr_lock;
1684 lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
1686 lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
1688 ini->first_contact_local = 1;
1689 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
1690 if (role == SMC_CLNT && ini->first_contact_peer)
1691 /* create new link group as well */
1694 /* determine if an existing link group can be reused */
1695 spin_lock_bh(lgr_lock);
1696 list_for_each_entry(lgr, lgr_list, list) {
1697 write_lock_bh(&lgr->conns_lock);
1699 smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
1700 ini->ism_peer_gid[ini->ism_selected]) :
1701 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
1703 (ini->smcd_version == SMC_V2 ||
1704 lgr->vlan_id == ini->vlan_id) &&
1705 (role == SMC_CLNT || ini->is_smcd ||
1706 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
1707 /* link group found */
1708 ini->first_contact_local = 0;
1710 rc = smc_lgr_register_conn(conn, false);
1711 write_unlock_bh(&lgr->conns_lock);
1712 if (!rc && delayed_work_pending(&lgr->free_work))
1713 cancel_delayed_work(&lgr->free_work);
1716 write_unlock_bh(&lgr->conns_lock);
1718 spin_unlock_bh(lgr_lock);
1722 if (role == SMC_CLNT && !ini->first_contact_peer &&
1723 ini->first_contact_local) {
1724 /* Server reuses a link group, but Client wants to start
1726 * send out_of_sync decline, reason synchr. error
1728 return SMC_CLC_DECL_SYNCERR;
1732 if (ini->first_contact_local) {
1733 rc = smc_lgr_create(smc, ini);
1737 write_lock_bh(&lgr->conns_lock);
1738 rc = smc_lgr_register_conn(conn, true);
1739 write_unlock_bh(&lgr->conns_lock);
1743 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1744 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1745 conn->urg_state = SMC_URG_READ;
1746 INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
1748 conn->rx_off = sizeof(struct smcd_cdc_msg);
1749 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1753 #ifndef KERNEL_HAS_ATOMIC64
1754 spin_lock_init(&conn->acurs_lock);
1761 #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1762 #define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */
1764 /* convert the RMB size into the compressed notation (minimum 16K, see
1765 * SMCD/R_DMBE_SIZES.
1766 * In contrast to plain ilog2, this rounds towards the next power of 2,
1767 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1769 static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb)
1771 const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE;
1774 if (size <= SMC_BUF_MIN_SIZE)
1777 size = (size - 1) >> 14; /* convert to 16K multiple */
1778 compressed = min_t(u8, ilog2(size) + 1,
1779 is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES);
1781 if (!is_smcd && is_rmb)
1782 /* RMBs are backed by & limited to max size of scatterlists */
1783 compressed = min_t(u8, compressed, ilog2(max_scat >> 14));
1788 /* convert the RMB size from compressed notation into integer */
1789 int smc_uncompress_bufsize(u8 compressed)
1793 size = 0x00000001 << (((int)compressed) + 14);
1797 /* try to reuse a sndbuf or rmb description slot for a certain
1798 * buffer size; if not available, return NULL
1800 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1802 struct list_head *buf_list)
1804 struct smc_buf_desc *buf_slot;
1807 list_for_each_entry(buf_slot, buf_list, list) {
1808 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1817 /* one of the conditions for announcing a receiver's current window size is
1818 * that it "results in a minimum increase in the window size of 10% of the
1819 * receive buffer space" [RFC7609]
1821 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
1823 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1826 /* map an rmb buf to a link */
1827 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1828 struct smc_link *lnk)
1832 if (buf_desc->is_map_ib[lnk->link_idx])
1835 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1838 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1839 buf_desc->cpu_addr, buf_desc->len);
1841 /* map sg table to DMA address */
1842 rc = smc_ib_buf_map_sg(lnk, buf_desc,
1843 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1844 /* SMC protocol depends on mapping to one DMA address only */
1850 /* create a new memory region for the RMB */
1852 rc = smc_ib_get_memory_region(lnk->roce_pd,
1853 IB_ACCESS_REMOTE_WRITE |
1854 IB_ACCESS_LOCAL_WRITE,
1855 buf_desc, lnk->link_idx);
1858 smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1860 buf_desc->is_map_ib[lnk->link_idx] = true;
1864 smc_ib_buf_unmap_sg(lnk, buf_desc,
1865 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1867 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1871 /* register a new rmb on IB device,
1872 * must be called under lgr->llc_conf_mutex lock
1874 int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
1876 if (list_empty(&link->lgr->list))
1878 if (!rmb_desc->is_reg_mr[link->link_idx]) {
1879 /* register memory region for new rmb */
1880 if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
1881 rmb_desc->is_reg_err = true;
1884 rmb_desc->is_reg_mr[link->link_idx] = true;
1889 static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
1890 struct list_head *lst, bool is_rmb)
1892 struct smc_buf_desc *buf_desc, *bf;
1896 list_for_each_entry_safe(buf_desc, bf, lst, list) {
1897 if (!buf_desc->used)
1899 rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
1908 /* map all used buffers of lgr for a new link */
1909 int smcr_buf_map_lgr(struct smc_link *lnk)
1911 struct smc_link_group *lgr = lnk->lgr;
1914 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1915 rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
1916 &lgr->rmbs[i], true);
1919 rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
1920 &lgr->sndbufs[i], false);
1927 /* register all used buffers of lgr for a new link,
1928 * must be called under lgr->llc_conf_mutex lock
1930 int smcr_buf_reg_lgr(struct smc_link *lnk)
1932 struct smc_link_group *lgr = lnk->lgr;
1933 struct smc_buf_desc *buf_desc, *bf;
1936 mutex_lock(&lgr->rmbs_lock);
1937 for (i = 0; i < SMC_RMBE_SIZES; i++) {
1938 list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
1939 if (!buf_desc->used)
1941 rc = smcr_link_reg_rmb(lnk, buf_desc);
1947 mutex_unlock(&lgr->rmbs_lock);
1951 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
1952 bool is_rmb, int bufsize)
1954 struct smc_buf_desc *buf_desc;
1956 /* try to alloc a new buffer */
1957 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1959 return ERR_PTR(-ENOMEM);
1961 buf_desc->order = get_order(bufsize);
1962 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
1963 __GFP_NOMEMALLOC | __GFP_COMP |
1964 __GFP_NORETRY | __GFP_ZERO,
1966 if (!buf_desc->pages) {
1968 return ERR_PTR(-EAGAIN);
1970 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1971 buf_desc->len = bufsize;
1975 /* map buf_desc on all usable links,
1976 * unused buffers stay mapped as long as the link is up
1978 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1979 struct smc_buf_desc *buf_desc, bool is_rmb)
1983 /* protect against parallel link reconfiguration */
1984 mutex_lock(&lgr->llc_conf_mutex);
1985 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1986 struct smc_link *lnk = &lgr->lnk[i];
1988 if (!smc_link_usable(lnk))
1990 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1996 mutex_unlock(&lgr->llc_conf_mutex);
2000 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
2001 bool is_dmb, int bufsize)
2003 struct smc_buf_desc *buf_desc;
2006 /* try to alloc a new DMB */
2007 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
2009 return ERR_PTR(-ENOMEM);
2011 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
2015 return ERR_PTR(-EAGAIN);
2017 return ERR_PTR(-ENOSPC);
2018 return ERR_PTR(-EIO);
2020 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
2021 /* CDC header stored in buf. So, pretend it was smaller */
2022 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
2024 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
2025 __GFP_NOWARN | __GFP_NORETRY |
2027 if (!buf_desc->cpu_addr) {
2029 return ERR_PTR(-EAGAIN);
2031 buf_desc->len = bufsize;
2036 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
2038 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
2039 struct smc_connection *conn = &smc->conn;
2040 struct smc_link_group *lgr = conn->lgr;
2041 struct list_head *buf_list;
2042 int bufsize, bufsize_short;
2043 bool is_dgraded = false;
2044 struct mutex *lock; /* lock buffer list */
2048 /* use socket recv buffer size (w/o overhead) as start value */
2049 sk_buf_size = smc->sk.sk_rcvbuf / 2;
2051 /* use socket send buffer size (w/o overhead) as start value */
2052 sk_buf_size = smc->sk.sk_sndbuf / 2;
2054 for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);
2055 bufsize_short >= 0; bufsize_short--) {
2057 lock = &lgr->rmbs_lock;
2058 buf_list = &lgr->rmbs[bufsize_short];
2060 lock = &lgr->sndbufs_lock;
2061 buf_list = &lgr->sndbufs[bufsize_short];
2063 bufsize = smc_uncompress_bufsize(bufsize_short);
2065 /* check for reusable slot in the link group */
2066 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
2068 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2069 SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
2070 memset(buf_desc->cpu_addr, 0, bufsize);
2071 break; /* found reusable slot */
2075 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
2077 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
2079 if (PTR_ERR(buf_desc) == -ENOMEM)
2081 if (IS_ERR(buf_desc)) {
2084 SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rmb);
2089 SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
2090 SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
2093 list_add(&buf_desc->list, buf_list);
2098 if (IS_ERR(buf_desc))
2099 return PTR_ERR(buf_desc);
2102 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
2103 smcr_buf_unuse(buf_desc, lgr);
2109 conn->rmb_desc = buf_desc;
2110 conn->rmbe_size_short = bufsize_short;
2111 smc->sk.sk_rcvbuf = bufsize * 2;
2112 atomic_set(&conn->bytes_to_rcv, 0);
2113 conn->rmbe_update_limit =
2114 smc_rmb_wnd_update_limit(buf_desc->len);
2116 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
2118 conn->sndbuf_desc = buf_desc;
2119 smc->sk.sk_sndbuf = bufsize * 2;
2120 atomic_set(&conn->sndbuf_space, bufsize);
2125 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
2127 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
2129 smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2132 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
2134 if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
2136 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
2139 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
2143 if (!conn->lgr || conn->lgr->is_smcd)
2145 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2146 if (!smc_link_active(&conn->lgr->lnk[i]))
2148 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
2153 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
2157 if (!conn->lgr || conn->lgr->is_smcd)
2159 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
2160 if (!smc_link_active(&conn->lgr->lnk[i]))
2162 smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
2167 /* create the send and receive buffer for an SMC socket;
2168 * receive buffers are called RMBs;
2169 * (even though the SMC protocol allows more than one RMB-element per RMB,
2170 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
2171 * extra RMB for every connection in a link group
2173 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
2177 /* create send buffer */
2178 rc = __smc_buf_create(smc, is_smcd, false);
2182 rc = __smc_buf_create(smc, is_smcd, true);
2184 mutex_lock(&smc->conn.lgr->sndbufs_lock);
2185 list_del(&smc->conn.sndbuf_desc->list);
2186 mutex_unlock(&smc->conn.lgr->sndbufs_lock);
2187 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
2188 smc->conn.sndbuf_desc = NULL;
2193 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
2197 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
2198 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
2204 static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
2209 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2210 if (test_bit(i, lgr->rtokens_used_mask) &&
2211 lgr->rtokens[i][lnk_idx].rkey == rkey)
2217 /* set rtoken for a new link to an existing rmb */
2218 void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
2219 __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
2223 rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
2224 if (rtok_idx == -ENOENT)
2226 lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
2227 lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
2230 /* set rtoken for a new link whose link_id is given */
2231 void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
2232 __be64 nw_vaddr, __be32 nw_rkey)
2234 u64 dma_addr = be64_to_cpu(nw_vaddr);
2235 u32 rkey = ntohl(nw_rkey);
2239 for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
2240 if (lgr->lnk[link_idx].link_id == link_id) {
2247 lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
2248 lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
2251 /* add a new rtoken from peer */
2252 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
2254 struct smc_link_group *lgr = smc_get_lgr(lnk);
2255 u64 dma_addr = be64_to_cpu(nw_vaddr);
2256 u32 rkey = ntohl(nw_rkey);
2259 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2260 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2261 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
2262 test_bit(i, lgr->rtokens_used_mask)) {
2263 /* already in list */
2267 i = smc_rmb_reserve_rtoken_idx(lgr);
2270 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
2271 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
2275 /* delete an rtoken from all links */
2276 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
2278 struct smc_link_group *lgr = smc_get_lgr(lnk);
2279 u32 rkey = ntohl(nw_rkey);
2282 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
2283 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
2284 test_bit(i, lgr->rtokens_used_mask)) {
2285 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
2286 lgr->rtokens[i][j].rkey = 0;
2287 lgr->rtokens[i][j].dma_addr = 0;
2289 clear_bit(i, lgr->rtokens_used_mask);
2296 /* save rkey and dma_addr received from peer during clc handshake */
2297 int smc_rmb_rtoken_handling(struct smc_connection *conn,
2298 struct smc_link *lnk,
2299 struct smc_clc_msg_accept_confirm *clc)
2301 conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
2303 if (conn->rtoken_idx < 0)
2304 return conn->rtoken_idx;
2308 static void smc_core_going_away(void)
2310 struct smc_ib_device *smcibdev;
2311 struct smcd_dev *smcd;
2313 mutex_lock(&smc_ib_devices.mutex);
2314 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
2317 for (i = 0; i < SMC_MAX_PORTS; i++)
2318 set_bit(i, smcibdev->ports_going_away);
2320 mutex_unlock(&smc_ib_devices.mutex);
2322 mutex_lock(&smcd_dev_list.mutex);
2323 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
2324 smcd->going_away = 1;
2326 mutex_unlock(&smcd_dev_list.mutex);
2329 /* Clean up all SMC link groups */
2330 static void smc_lgrs_shutdown(void)
2332 struct smcd_dev *smcd;
2334 smc_core_going_away();
2336 smc_smcr_terminate_all(NULL);
2338 mutex_lock(&smcd_dev_list.mutex);
2339 list_for_each_entry(smcd, &smcd_dev_list.list, list)
2340 smc_smcd_terminate_all(smcd);
2341 mutex_unlock(&smcd_dev_list.mutex);
2344 static int smc_core_reboot_event(struct notifier_block *this,
2345 unsigned long event, void *ptr)
2347 smc_lgrs_shutdown();
2348 smc_ib_unregister_client();
2352 static struct notifier_block smc_reboot_notifier = {
2353 .notifier_call = smc_core_reboot_event,
2356 int __init smc_core_init(void)
2358 return register_reboot_notifier(&smc_reboot_notifier);
2361 /* Called (from smc_exit) when module is removed */
2362 void smc_core_exit(void)
2364 unregister_reboot_notifier(&smc_reboot_notifier);
2365 smc_lgrs_shutdown();