1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Link Layer Control (LLC)
7 * Copyright IBM Corp. 2016
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
14 #include <rdma/ib_verbs.h>
22 #define SMC_LLC_DATA_LEN 40
25 struct smc_wr_rx_hdr common;
27 #if defined(__BIG_ENDIAN_BITFIELD)
30 #elif defined(__LITTLE_ENDIAN_BITFIELD)
31 u8 add_link_rej_rsn:4,
37 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
39 struct smc_llc_msg_confirm_link { /* type 0x01 */
40 struct smc_llc_hdr hd;
41 u8 sender_mac[ETH_ALEN];
42 u8 sender_gid[SMC_GID_SIZE];
45 u8 link_uid[SMC_LGR_ID_SIZE];
50 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
51 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
53 #define SMC_LLC_ADD_LNK_MAX_LINKS 2
55 struct smc_llc_msg_add_link { /* type 0x02 */
56 struct smc_llc_hdr hd;
57 u8 sender_mac[ETH_ALEN];
59 u8 sender_gid[SMC_GID_SIZE];
62 #if defined(__BIG_ENDIAN_BITFIELD)
65 #elif defined(__LITTLE_ENDIAN_BITFIELD)
73 struct smc_llc_msg_add_link_cont_rt {
79 #define SMC_LLC_RKEYS_PER_CONT_MSG 2
81 struct smc_llc_msg_add_link_cont { /* type 0x03 */
82 struct smc_llc_hdr hd;
86 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG];
88 } __packed; /* format defined in RFC7609 */
90 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
91 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
93 struct smc_llc_msg_del_link { /* type 0x04 */
94 struct smc_llc_hdr hd;
98 } __packed; /* format defined in RFC7609 */
100 struct smc_llc_msg_test_link { /* type 0x07 */
101 struct smc_llc_hdr hd;
106 struct smc_rmb_rtoken {
108 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
109 /* is actually the num of rtokens, first */
110 /* rtoken is always for the current link */
111 u8 link_id; /* link id of the rtoken */
115 } __packed; /* format defined in RFC7609 */
117 #define SMC_LLC_RKEYS_PER_MSG 3
119 struct smc_llc_msg_confirm_rkey { /* type 0x06 */
120 struct smc_llc_hdr hd;
121 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
125 #define SMC_LLC_DEL_RKEY_MAX 8
126 #define SMC_LLC_FLAG_RKEY_RETRY 0x10
127 #define SMC_LLC_FLAG_RKEY_NEG 0x20
129 struct smc_llc_msg_delete_rkey { /* type 0x09 */
130 struct smc_llc_hdr hd;
139 struct smc_llc_msg_confirm_link confirm_link;
140 struct smc_llc_msg_add_link add_link;
141 struct smc_llc_msg_add_link_cont add_link_cont;
142 struct smc_llc_msg_del_link delete_link;
144 struct smc_llc_msg_confirm_rkey confirm_rkey;
145 struct smc_llc_msg_delete_rkey delete_rkey;
147 struct smc_llc_msg_test_link test_link;
149 struct smc_llc_hdr hdr;
150 u8 data[SMC_LLC_DATA_LEN];
154 #define SMC_LLC_FLAG_RESP 0x80
156 struct smc_llc_qentry {
157 struct list_head list;
158 struct smc_link *link;
159 union smc_llc_msg msg;
162 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc);
164 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
166 struct smc_llc_qentry *qentry = flow->qentry;
172 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
174 struct smc_llc_qentry *qentry;
177 qentry = flow->qentry;
183 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
184 struct smc_llc_qentry *qentry)
186 flow->qentry = qentry;
189 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
190 struct smc_llc_qentry *qentry)
192 u8 msg_type = qentry->msg.raw.hdr.common.type;
194 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
195 flow_type != msg_type && !lgr->delayed_event) {
196 lgr->delayed_event = qentry;
199 /* drop parallel or already-in-progress llc requests */
200 if (flow_type != msg_type)
201 pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
202 "LLC msg: msg %d flow %d role %d\n",
203 SMC_LGR_ID_SIZE, &lgr->id,
204 qentry->msg.raw.hdr.common.type,
205 flow_type, lgr->role);
209 /* try to start a new llc flow, initiated by an incoming llc msg */
210 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
211 struct smc_llc_qentry *qentry)
213 struct smc_link_group *lgr = qentry->link->lgr;
215 spin_lock_bh(&lgr->llc_flow_lock);
217 /* a flow is already active */
218 smc_llc_flow_parallel(lgr, flow->type, qentry);
219 spin_unlock_bh(&lgr->llc_flow_lock);
222 switch (qentry->msg.raw.hdr.common.type) {
223 case SMC_LLC_ADD_LINK:
224 flow->type = SMC_LLC_FLOW_ADD_LINK;
226 case SMC_LLC_DELETE_LINK:
227 flow->type = SMC_LLC_FLOW_DEL_LINK;
229 case SMC_LLC_CONFIRM_RKEY:
230 case SMC_LLC_DELETE_RKEY:
231 flow->type = SMC_LLC_FLOW_RKEY;
234 flow->type = SMC_LLC_FLOW_NONE;
236 smc_llc_flow_qentry_set(flow, qentry);
237 spin_unlock_bh(&lgr->llc_flow_lock);
241 /* start a new local llc flow, wait till current flow finished */
242 int smc_llc_flow_initiate(struct smc_link_group *lgr,
243 enum smc_llc_flowtype type)
245 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
248 /* all flows except confirm_rkey and delete_rkey are exclusive,
249 * confirm/delete rkey flows can run concurrently (local and remote)
251 if (type == SMC_LLC_FLOW_RKEY)
252 allowed_remote = SMC_LLC_FLOW_RKEY;
254 if (list_empty(&lgr->list))
256 spin_lock_bh(&lgr->llc_flow_lock);
257 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
258 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
259 lgr->llc_flow_rmt.type == allowed_remote)) {
260 lgr->llc_flow_lcl.type = type;
261 spin_unlock_bh(&lgr->llc_flow_lock);
264 spin_unlock_bh(&lgr->llc_flow_lock);
265 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
266 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
267 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
268 lgr->llc_flow_rmt.type == allowed_remote))),
269 SMC_LLC_WAIT_TIME * 10);
275 /* finish the current llc flow */
276 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
278 spin_lock_bh(&lgr->llc_flow_lock);
279 memset(flow, 0, sizeof(*flow));
280 flow->type = SMC_LLC_FLOW_NONE;
281 spin_unlock_bh(&lgr->llc_flow_lock);
282 if (!list_empty(&lgr->list) && lgr->delayed_event &&
283 flow == &lgr->llc_flow_lcl)
284 schedule_work(&lgr->llc_event_work);
286 wake_up(&lgr->llc_flow_waiter);
289 /* lnk is optional and used for early wakeup when link goes down, useful in
290 * cases where we wait for a response on the link after we sent a request
292 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
293 struct smc_link *lnk,
294 int time_out, u8 exp_msg)
296 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
299 wait_event_timeout(lgr->llc_msg_waiter,
301 (lnk && !smc_link_usable(lnk)) ||
302 list_empty(&lgr->list)),
305 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
306 smc_llc_flow_qentry_del(flow);
309 rcv_msg = flow->qentry->msg.raw.hdr.common.type;
310 if (exp_msg && rcv_msg != exp_msg) {
311 if (exp_msg == SMC_LLC_ADD_LINK &&
312 rcv_msg == SMC_LLC_DELETE_LINK) {
313 /* flow_start will delay the unexpected msg */
314 smc_llc_flow_start(&lgr->llc_flow_lcl,
315 smc_llc_flow_qentry_clr(flow));
318 pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
319 "msg %d exp %d flow %d role %d flags %x\n",
320 SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
321 flow->type, lgr->role,
322 flow->qentry->msg.raw.hdr.flags);
323 smc_llc_flow_qentry_del(flow);
329 /********************************** send *************************************/
331 struct smc_llc_tx_pend {
334 /* handler for send/transmission completion of an LLC msg */
335 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
336 struct smc_link *link,
337 enum ib_wc_status wc_status)
339 /* future work: handle wc_status error for recovery and failover */
343 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
344 * @link: Pointer to SMC link used for sending LLC control message.
345 * @wr_buf: Out variable returning pointer to work request payload buffer.
346 * @pend: Out variable returning pointer to private pending WR tracking.
347 * It's the context the transmit complete handler will get.
349 * Reserves and pre-fills an entry for a pending work request send/tx.
350 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
351 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
353 * Return: 0 on success, otherwise an error value.
355 static int smc_llc_add_pending_send(struct smc_link *link,
356 struct smc_wr_buf **wr_buf,
357 struct smc_wr_tx_pend_priv **pend)
361 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
366 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
367 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
369 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
370 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
372 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
373 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
377 /* high-level API to send LLC confirm link */
378 int smc_llc_send_confirm_link(struct smc_link *link,
379 enum smc_llc_reqresp reqresp)
381 struct smc_llc_msg_confirm_link *confllc;
382 struct smc_wr_tx_pend_priv *pend;
383 struct smc_wr_buf *wr_buf;
386 if (!smc_wr_tx_link_hold(link))
388 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
391 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
392 memset(confllc, 0, sizeof(*confllc));
393 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
394 confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
395 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
396 if (reqresp == SMC_LLC_RESP)
397 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
398 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
400 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
401 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
402 confllc->link_num = link->link_id;
403 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE);
404 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS;
405 /* send llc message */
406 rc = smc_wr_tx_send(link, pend);
408 smc_wr_tx_link_put(link);
412 /* send LLC confirm rkey request */
413 static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
414 struct smc_buf_desc *rmb_desc)
416 struct smc_llc_msg_confirm_rkey *rkeyllc;
417 struct smc_wr_tx_pend_priv *pend;
418 struct smc_wr_buf *wr_buf;
419 struct smc_link *link;
422 if (!smc_wr_tx_link_hold(send_link))
424 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
427 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
428 memset(rkeyllc, 0, sizeof(*rkeyllc));
429 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
430 rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
433 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
434 link = &send_link->lgr->lnk[i];
435 if (smc_link_active(link) && link != send_link) {
436 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
437 rkeyllc->rtoken[rtok_ix].rmb_key =
438 htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
439 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
441 rmb_desc->sgt[link->link_idx].sgl));
445 /* rkey of send_link is in rtoken[0] */
446 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
447 rkeyllc->rtoken[0].rmb_key =
448 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
449 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
450 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
451 /* send llc message */
452 rc = smc_wr_tx_send(send_link, pend);
454 smc_wr_tx_link_put(send_link);
458 /* send LLC delete rkey request */
459 static int smc_llc_send_delete_rkey(struct smc_link *link,
460 struct smc_buf_desc *rmb_desc)
462 struct smc_llc_msg_delete_rkey *rkeyllc;
463 struct smc_wr_tx_pend_priv *pend;
464 struct smc_wr_buf *wr_buf;
467 if (!smc_wr_tx_link_hold(link))
469 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
472 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
473 memset(rkeyllc, 0, sizeof(*rkeyllc));
474 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
475 rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
476 rkeyllc->num_rkeys = 1;
477 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
478 /* send llc message */
479 rc = smc_wr_tx_send(link, pend);
481 smc_wr_tx_link_put(link);
485 /* send ADD LINK request or response */
486 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
487 struct smc_link *link_new,
488 enum smc_llc_reqresp reqresp)
490 struct smc_llc_msg_add_link *addllc;
491 struct smc_wr_tx_pend_priv *pend;
492 struct smc_wr_buf *wr_buf;
495 if (!smc_wr_tx_link_hold(link))
497 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
500 addllc = (struct smc_llc_msg_add_link *)wr_buf;
502 memset(addllc, 0, sizeof(*addllc));
503 addllc->hd.common.type = SMC_LLC_ADD_LINK;
504 addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
505 if (reqresp == SMC_LLC_RESP)
506 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
507 memcpy(addllc->sender_mac, mac, ETH_ALEN);
508 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
510 addllc->link_num = link_new->link_id;
511 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num);
512 hton24(addllc->initial_psn, link_new->psn_initial);
513 if (reqresp == SMC_LLC_REQ)
514 addllc->qp_mtu = link_new->path_mtu;
516 addllc->qp_mtu = min(link_new->path_mtu,
519 /* send llc message */
520 rc = smc_wr_tx_send(link, pend);
522 smc_wr_tx_link_put(link);
526 /* send DELETE LINK request or response */
527 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
528 enum smc_llc_reqresp reqresp, bool orderly,
531 struct smc_llc_msg_del_link *delllc;
532 struct smc_wr_tx_pend_priv *pend;
533 struct smc_wr_buf *wr_buf;
536 if (!smc_wr_tx_link_hold(link))
538 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
541 delllc = (struct smc_llc_msg_del_link *)wr_buf;
543 memset(delllc, 0, sizeof(*delllc));
544 delllc->hd.common.type = SMC_LLC_DELETE_LINK;
545 delllc->hd.length = sizeof(struct smc_llc_msg_del_link);
546 if (reqresp == SMC_LLC_RESP)
547 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
549 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
551 delllc->link_num = link_del_id;
553 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
554 delllc->reason = htonl(reason);
555 /* send llc message */
556 rc = smc_wr_tx_send(link, pend);
558 smc_wr_tx_link_put(link);
562 /* send LLC test link request */
563 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
565 struct smc_llc_msg_test_link *testllc;
566 struct smc_wr_tx_pend_priv *pend;
567 struct smc_wr_buf *wr_buf;
570 if (!smc_wr_tx_link_hold(link))
572 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
575 testllc = (struct smc_llc_msg_test_link *)wr_buf;
576 memset(testllc, 0, sizeof(*testllc));
577 testllc->hd.common.type = SMC_LLC_TEST_LINK;
578 testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
579 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
580 /* send llc message */
581 rc = smc_wr_tx_send(link, pend);
583 smc_wr_tx_link_put(link);
587 /* schedule an llc send on link, may wait for buffers */
588 static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
590 struct smc_wr_tx_pend_priv *pend;
591 struct smc_wr_buf *wr_buf;
594 if (!smc_wr_tx_link_hold(link))
596 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
599 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
600 rc = smc_wr_tx_send(link, pend);
602 smc_wr_tx_link_put(link);
606 /* schedule an llc send on link, may wait for buffers,
607 * and wait for send completion notification.
608 * @return 0 on success
610 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf)
612 struct smc_wr_tx_pend_priv *pend;
613 struct smc_wr_buf *wr_buf;
616 if (!smc_wr_tx_link_hold(link))
618 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
621 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
622 rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME);
624 smc_wr_tx_link_put(link);
628 /********************************* receive ***********************************/
630 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr,
631 enum smc_lgr_type lgr_new_t)
635 if (lgr->type == SMC_LGR_SYMMETRIC ||
636 (lgr->type != SMC_LGR_SINGLE &&
637 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
638 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)))
641 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
642 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) {
643 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--)
644 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
647 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
648 if (lgr->lnk[i].state == SMC_LNK_UNUSED)
654 /* return first buffer from any of the next buf lists */
655 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr,
658 struct smc_buf_desc *buf_pos;
660 while (*buf_lst < SMC_RMBE_SIZES) {
661 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst],
662 struct smc_buf_desc, list);
670 /* return next rmb from buffer lists */
671 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
673 struct smc_buf_desc *buf_pos)
675 struct smc_buf_desc *buf_next;
677 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
679 return _smc_llc_get_next_rmb(lgr, buf_lst);
681 buf_next = list_next_entry(buf_pos, list);
685 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr,
689 return smc_llc_get_next_rmb(lgr, buf_lst, NULL);
692 /* send one add_link_continue msg */
693 static int smc_llc_add_link_cont(struct smc_link *link,
694 struct smc_link *link_new, u8 *num_rkeys_todo,
695 int *buf_lst, struct smc_buf_desc **buf_pos)
697 struct smc_llc_msg_add_link_cont *addc_llc;
698 struct smc_link_group *lgr = link->lgr;
699 int prim_lnk_idx, lnk_idx, i, rc;
700 struct smc_wr_tx_pend_priv *pend;
701 struct smc_wr_buf *wr_buf;
702 struct smc_buf_desc *rmb;
705 if (!smc_wr_tx_link_hold(link))
707 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
710 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf;
711 memset(addc_llc, 0, sizeof(*addc_llc));
713 prim_lnk_idx = link->link_idx;
714 lnk_idx = link_new->link_idx;
715 addc_llc->link_num = link_new->link_id;
716 addc_llc->num_rkeys = *num_rkeys_todo;
718 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
720 addc_llc->num_rkeys = addc_llc->num_rkeys -
727 addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey);
728 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey);
729 addc_llc->rt[i].rmb_vaddr_new =
730 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
733 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
734 while (*buf_pos && !(*buf_pos)->used)
735 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
737 addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
738 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
739 if (lgr->role == SMC_CLNT)
740 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP;
741 rc = smc_wr_tx_send(link, pend);
743 smc_wr_tx_link_put(link);
747 static int smc_llc_cli_rkey_exchange(struct smc_link *link,
748 struct smc_link *link_new)
750 struct smc_llc_msg_add_link_cont *addc_llc;
751 struct smc_link_group *lgr = link->lgr;
752 u8 max, num_rkeys_send, num_rkeys_recv;
753 struct smc_llc_qentry *qentry;
754 struct smc_buf_desc *buf_pos;
759 mutex_lock(&lgr->rmbs_lock);
760 num_rkeys_send = lgr->conns_num;
761 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
763 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME,
764 SMC_LLC_ADD_LINK_CONT);
769 addc_llc = &qentry->msg.add_link_cont;
770 num_rkeys_recv = addc_llc->num_rkeys;
771 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
772 for (i = 0; i < max; i++) {
773 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
774 addc_llc->rt[i].rmb_key,
775 addc_llc->rt[i].rmb_vaddr_new,
776 addc_llc->rt[i].rmb_key_new);
779 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
780 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
784 } while (num_rkeys_send || num_rkeys_recv);
786 mutex_unlock(&lgr->rmbs_lock);
790 /* prepare and send an add link reject response */
791 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry)
793 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
794 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
795 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
796 return smc_llc_send_message(qentry->link, &qentry->msg);
799 static int smc_llc_cli_conf_link(struct smc_link *link,
800 struct smc_init_info *ini,
801 struct smc_link *link_new,
802 enum smc_lgr_type lgr_new_t)
804 struct smc_link_group *lgr = link->lgr;
805 struct smc_llc_qentry *qentry = NULL;
808 /* receive CONFIRM LINK request over RoCE fabric */
809 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0);
811 rc = smc_llc_send_delete_link(link, link_new->link_id,
813 SMC_LLC_DEL_LOST_PATH);
816 if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
817 /* received DELETE_LINK instead */
818 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP;
819 smc_llc_send_message(link, &qentry->msg);
820 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
823 smc_llc_save_peer_uid(qentry);
824 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
826 rc = smc_ib_modify_qp_rts(link_new);
828 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
829 false, SMC_LLC_DEL_LOST_PATH);
832 smc_wr_remember_qp_attr(link_new);
834 rc = smcr_buf_reg_lgr(link_new);
836 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
837 false, SMC_LLC_DEL_LOST_PATH);
841 /* send CONFIRM LINK response over RoCE fabric */
842 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP);
844 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
845 false, SMC_LLC_DEL_LOST_PATH);
848 smc_llc_link_active(link_new);
849 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
850 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
851 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
853 smcr_lgr_set_type(lgr, lgr_new_t);
857 static void smc_llc_save_add_link_info(struct smc_link *link,
858 struct smc_llc_msg_add_link *add_llc)
860 link->peer_qpn = ntoh24(add_llc->sender_qp_num);
861 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE);
862 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN);
863 link->peer_psn = ntoh24(add_llc->initial_psn);
864 link->peer_mtu = add_llc->qp_mtu;
867 /* as an SMC client, process an add link request */
868 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
870 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link;
871 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
872 struct smc_link_group *lgr = smc_get_lgr(link);
873 struct smc_link *lnk_new = NULL;
874 struct smc_init_info ini;
880 ini.vlan_id = lgr->vlan_id;
881 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
882 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
883 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) {
886 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
889 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
890 ini.ib_dev = link->smcibdev;
891 ini.ib_port = link->ibport;
893 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
896 lnk_new = &lgr->lnk[lnk_idx];
897 rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini);
900 smc_llc_save_add_link_info(lnk_new, llc);
901 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */
902 smc_llc_link_set_uid(lnk_new);
904 rc = smc_ib_ready_link(lnk_new);
908 rc = smcr_buf_map_lgr(lnk_new);
912 rc = smc_llc_send_add_link(link,
913 lnk_new->smcibdev->mac[ini.ib_port - 1],
914 lnk_new->gid, lnk_new, SMC_LLC_RESP);
917 rc = smc_llc_cli_rkey_exchange(link, lnk_new);
922 rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t);
926 lnk_new->state = SMC_LNK_INACTIVE;
927 smcr_link_clear(lnk_new, false);
929 smc_llc_cli_add_link_reject(qentry);
935 /* as an SMC client, invite server to start the add_link processing */
936 static void smc_llc_cli_add_link_invite(struct smc_link *link,
937 struct smc_llc_qentry *qentry)
939 struct smc_link_group *lgr = smc_get_lgr(link);
940 struct smc_init_info ini;
942 if (lgr->type == SMC_LGR_SYMMETRIC ||
943 lgr->type == SMC_LGR_ASYMMETRIC_PEER)
946 ini.vlan_id = lgr->vlan_id;
947 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
951 smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
952 ini.ib_gid, NULL, SMC_LLC_REQ);
957 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
961 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
962 if (llc->raw.data[i])
967 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
969 if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
970 smc_llc_is_empty_llc_message(llc))
975 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
977 struct smc_llc_qentry *qentry;
979 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
981 mutex_lock(&lgr->llc_conf_mutex);
982 if (smc_llc_is_local_add_link(&qentry->msg))
983 smc_llc_cli_add_link_invite(qentry->link, qentry);
985 smc_llc_cli_add_link(qentry->link, qentry);
986 mutex_unlock(&lgr->llc_conf_mutex);
989 static int smc_llc_active_link_count(struct smc_link_group *lgr)
991 int i, link_count = 0;
993 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
994 if (!smc_link_active(&lgr->lnk[i]))
1001 /* find the asymmetric link when 3 links are established */
1002 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr)
1004 int asym_idx = -ENOENT;
1008 /* determine asymmetric link */
1010 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1011 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
1012 if (!smc_link_usable(&lgr->lnk[i]) ||
1013 !smc_link_usable(&lgr->lnk[j]))
1015 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid,
1017 found = true; /* asym_lnk is i or j */
1025 goto out; /* no asymmetric link */
1026 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) {
1027 if (!smc_link_usable(&lgr->lnk[k]))
1030 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid,
1036 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid,
1043 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx];
1046 static void smc_llc_delete_asym_link(struct smc_link_group *lgr)
1048 struct smc_link *lnk_new = NULL, *lnk_asym;
1049 struct smc_llc_qentry *qentry;
1052 lnk_asym = smc_llc_find_asym_link(lgr);
1054 return; /* no asymmetric link */
1055 if (!smc_link_downing(&lnk_asym->state))
1057 lnk_new = smc_switch_conns(lgr, lnk_asym, false);
1058 smc_wr_tx_wait_no_pending_sends(lnk_asym);
1061 /* change flow type from ADD_LINK into DEL_LINK */
1062 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK;
1063 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ,
1064 true, SMC_LLC_DEL_NO_ASYM_NEEDED);
1066 smcr_link_down_cond(lnk_new);
1069 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME,
1070 SMC_LLC_DELETE_LINK);
1072 smcr_link_down_cond(lnk_new);
1075 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1077 smcr_link_clear(lnk_asym, true);
1080 static int smc_llc_srv_rkey_exchange(struct smc_link *link,
1081 struct smc_link *link_new)
1083 struct smc_llc_msg_add_link_cont *addc_llc;
1084 struct smc_link_group *lgr = link->lgr;
1085 u8 max, num_rkeys_send, num_rkeys_recv;
1086 struct smc_llc_qentry *qentry = NULL;
1087 struct smc_buf_desc *buf_pos;
1092 mutex_lock(&lgr->rmbs_lock);
1093 num_rkeys_send = lgr->conns_num;
1094 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
1096 smc_llc_add_link_cont(link, link_new, &num_rkeys_send,
1097 &buf_lst, &buf_pos);
1098 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME,
1099 SMC_LLC_ADD_LINK_CONT);
1104 addc_llc = &qentry->msg.add_link_cont;
1105 num_rkeys_recv = addc_llc->num_rkeys;
1106 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG);
1107 for (i = 0; i < max; i++) {
1108 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
1109 addc_llc->rt[i].rmb_key,
1110 addc_llc->rt[i].rmb_vaddr_new,
1111 addc_llc->rt[i].rmb_key_new);
1114 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1115 } while (num_rkeys_send || num_rkeys_recv);
1117 mutex_unlock(&lgr->rmbs_lock);
1121 static int smc_llc_srv_conf_link(struct smc_link *link,
1122 struct smc_link *link_new,
1123 enum smc_lgr_type lgr_new_t)
1125 struct smc_link_group *lgr = link->lgr;
1126 struct smc_llc_qentry *qentry = NULL;
1129 /* send CONFIRM LINK request over the RoCE fabric */
1130 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ);
1133 /* receive CONFIRM LINK response over the RoCE fabric */
1134 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
1136 qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
1137 /* send DELETE LINK */
1138 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
1139 false, SMC_LLC_DEL_LOST_PATH);
1141 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1144 smc_llc_save_peer_uid(qentry);
1145 smc_llc_link_active(link_new);
1146 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL ||
1147 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER)
1148 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx);
1150 smcr_lgr_set_type(lgr, lgr_new_t);
1151 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1155 int smc_llc_srv_add_link(struct smc_link *link)
1157 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC;
1158 struct smc_link_group *lgr = link->lgr;
1159 struct smc_llc_msg_add_link *add_llc;
1160 struct smc_llc_qentry *qentry = NULL;
1161 struct smc_link *link_new;
1162 struct smc_init_info ini;
1163 int lnk_idx, rc = 0;
1165 /* ignore client add link recommendation, start new flow */
1166 ini.vlan_id = lgr->vlan_id;
1167 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
1169 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL;
1170 ini.ib_dev = link->smcibdev;
1171 ini.ib_port = link->ibport;
1173 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t);
1177 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini);
1180 link_new = &lgr->lnk[lnk_idx];
1181 rc = smc_llc_send_add_link(link,
1182 link_new->smcibdev->mac[ini.ib_port - 1],
1183 link_new->gid, link_new, SMC_LLC_REQ);
1186 /* receive ADD LINK response over the RoCE fabric */
1187 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK);
1192 add_llc = &qentry->msg.add_link;
1193 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) {
1194 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1198 if (lgr->type == SMC_LGR_SINGLE &&
1199 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
1200 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) {
1201 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER;
1203 smc_llc_save_add_link_info(link_new, add_llc);
1204 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1206 rc = smc_ib_ready_link(link_new);
1209 rc = smcr_buf_map_lgr(link_new);
1212 rc = smcr_buf_reg_lgr(link_new);
1215 rc = smc_llc_srv_rkey_exchange(link, link_new);
1218 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t);
1223 link_new->state = SMC_LNK_INACTIVE;
1224 smcr_link_clear(link_new, false);
1228 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
1230 struct smc_link *link = lgr->llc_flow_lcl.qentry->link;
1233 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1235 mutex_lock(&lgr->llc_conf_mutex);
1236 rc = smc_llc_srv_add_link(link);
1237 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) {
1238 /* delete any asymmetric link */
1239 smc_llc_delete_asym_link(lgr);
1241 mutex_unlock(&lgr->llc_conf_mutex);
1244 /* enqueue a local add_link req to trigger a new add_link flow */
1245 void smc_llc_add_link_local(struct smc_link *link)
1247 struct smc_llc_msg_add_link add_llc = {};
1249 add_llc.hd.length = sizeof(add_llc);
1250 add_llc.hd.common.type = SMC_LLC_ADD_LINK;
1251 /* no dev and port needed */
1252 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
1255 /* worker to process an add link message */
1256 static void smc_llc_add_link_work(struct work_struct *work)
1258 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1261 if (list_empty(&lgr->list)) {
1262 /* link group is terminating */
1263 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1267 if (lgr->role == SMC_CLNT)
1268 smc_llc_process_cli_add_link(lgr);
1270 smc_llc_process_srv_add_link(lgr);
1272 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1275 /* enqueue a local del_link msg to trigger a new del_link flow,
1276 * called only for role SMC_SERV
1278 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
1280 struct smc_llc_msg_del_link del_llc = {};
1282 del_llc.hd.length = sizeof(del_llc);
1283 del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
1284 del_llc.link_num = del_link_id;
1285 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH);
1286 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1287 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc);
1290 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
1292 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk;
1293 struct smc_llc_msg_del_link *del_llc;
1294 struct smc_llc_qentry *qentry;
1298 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1300 del_llc = &qentry->msg.delete_link;
1302 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1303 smc_lgr_terminate_sched(lgr);
1306 mutex_lock(&lgr->llc_conf_mutex);
1307 /* delete single link */
1308 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) {
1309 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num)
1311 lnk_del = &lgr->lnk[lnk_idx];
1314 del_llc->hd.flags |= SMC_LLC_FLAG_RESP;
1316 /* link was not found */
1317 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK);
1318 smc_llc_send_message(lnk, &qentry->msg);
1321 lnk_asym = smc_llc_find_asym_link(lgr);
1323 del_llc->reason = 0;
1324 smc_llc_send_message(lnk, &qentry->msg); /* response */
1326 if (smc_link_downing(&lnk_del->state))
1327 smc_switch_conns(lgr, lnk_del, false);
1328 smcr_link_clear(lnk_del, true);
1330 active_links = smc_llc_active_link_count(lgr);
1331 if (lnk_del == lnk_asym) {
1332 /* expected deletion of asym link, don't change lgr state */
1333 } else if (active_links == 1) {
1334 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1335 } else if (!active_links) {
1336 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1337 smc_lgr_terminate_sched(lgr);
1340 mutex_unlock(&lgr->llc_conf_mutex);
1345 /* try to send a DELETE LINK ALL request on any active link,
1346 * waiting for send completion
1348 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
1350 struct smc_llc_msg_del_link delllc = {};
1353 delllc.hd.common.type = SMC_LLC_DELETE_LINK;
1354 delllc.hd.length = sizeof(delllc);
1356 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
1357 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
1358 delllc.reason = htonl(rsn);
1360 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1361 if (!smc_link_sendable(&lgr->lnk[i]))
1363 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
1368 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
1370 struct smc_llc_msg_del_link *del_llc;
1371 struct smc_link *lnk, *lnk_del;
1372 struct smc_llc_qentry *qentry;
1376 mutex_lock(&lgr->llc_conf_mutex);
1377 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
1379 del_llc = &qentry->msg.delete_link;
1381 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) {
1382 /* delete entire lgr */
1383 smc_llc_send_link_delete_all(lgr, true, ntohl(
1384 qentry->msg.delete_link.reason));
1385 smc_lgr_terminate_sched(lgr);
1388 /* delete single link */
1390 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1391 if (lgr->lnk[i].link_id == del_llc->link_num) {
1392 lnk_del = &lgr->lnk[i];
1397 goto out; /* asymmetric link already deleted */
1399 if (smc_link_downing(&lnk_del->state)) {
1400 if (smc_switch_conns(lgr, lnk_del, false))
1401 smc_wr_tx_wait_no_pending_sends(lnk_del);
1403 if (!list_empty(&lgr->list)) {
1404 /* qentry is either a request from peer (send it back to
1405 * initiate the DELETE_LINK processing), or a locally
1406 * enqueued DELETE_LINK request (forward it)
1408 if (!smc_llc_send_message(lnk, &qentry->msg)) {
1409 struct smc_llc_qentry *qentry2;
1411 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME,
1412 SMC_LLC_DELETE_LINK);
1414 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1417 smcr_link_clear(lnk_del, true);
1419 active_links = smc_llc_active_link_count(lgr);
1420 if (active_links == 1) {
1421 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
1422 } else if (!active_links) {
1423 smcr_lgr_set_type(lgr, SMC_LGR_NONE);
1424 smc_lgr_terminate_sched(lgr);
1427 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
1428 /* trigger setup of asymm alt link */
1429 smc_llc_add_link_local(lnk);
1432 mutex_unlock(&lgr->llc_conf_mutex);
1436 static void smc_llc_delete_link_work(struct work_struct *work)
1438 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1441 if (list_empty(&lgr->list)) {
1442 /* link group is terminating */
1443 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1447 if (lgr->role == SMC_CLNT)
1448 smc_llc_process_cli_delete_link(lgr);
1450 smc_llc_process_srv_delete_link(lgr);
1452 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
1455 /* process a confirm_rkey request from peer, remote flow */
1456 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
1458 struct smc_llc_msg_confirm_rkey *llc;
1459 struct smc_llc_qentry *qentry;
1460 struct smc_link *link;
1465 qentry = lgr->llc_flow_rmt.qentry;
1466 llc = &qentry->msg.confirm_rkey;
1467 link = qentry->link;
1469 num_entries = llc->rtoken[0].num_rkeys;
1470 /* first rkey entry is for receiving link */
1471 rk_idx = smc_rtoken_add(link,
1472 llc->rtoken[0].rmb_vaddr,
1473 llc->rtoken[0].rmb_key);
1477 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
1478 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
1479 llc->rtoken[i].rmb_vaddr,
1480 llc->rtoken[i].rmb_key);
1481 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
1484 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1485 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
1487 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1488 smc_llc_send_message(link, &qentry->msg);
1489 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1492 /* process a delete_rkey request from peer, remote flow */
1493 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr)
1495 struct smc_llc_msg_delete_rkey *llc;
1496 struct smc_llc_qentry *qentry;
1497 struct smc_link *link;
1501 qentry = lgr->llc_flow_rmt.qentry;
1502 llc = &qentry->msg.delete_rkey;
1503 link = qentry->link;
1505 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
1506 for (i = 0; i < max; i++) {
1507 if (smc_rtoken_delete(link, llc->rkey[i]))
1508 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
1511 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
1512 llc->err_mask = err_mask;
1514 llc->hd.flags |= SMC_LLC_FLAG_RESP;
1515 smc_llc_send_message(link, &qentry->msg);
1516 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
1519 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
1521 pr_warn_ratelimited("smc: SMC-R lg %*phN LLC protocol violation: "
1522 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, type);
1523 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
1524 smc_lgr_terminate_sched(lgr);
1527 /* flush the llc event queue */
1528 static void smc_llc_event_flush(struct smc_link_group *lgr)
1530 struct smc_llc_qentry *qentry, *q;
1532 spin_lock_bh(&lgr->llc_event_q_lock);
1533 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
1534 list_del_init(&qentry->list);
1537 spin_unlock_bh(&lgr->llc_event_q_lock);
1540 static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
1542 union smc_llc_msg *llc = &qentry->msg;
1543 struct smc_link *link = qentry->link;
1544 struct smc_link_group *lgr = link->lgr;
1546 if (!smc_link_usable(link))
1549 switch (llc->raw.hdr.common.type) {
1550 case SMC_LLC_TEST_LINK:
1551 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
1552 smc_llc_send_message(link, llc);
1554 case SMC_LLC_ADD_LINK:
1555 if (list_empty(&lgr->list))
1556 goto out; /* lgr is terminating */
1557 if (lgr->role == SMC_CLNT) {
1558 if (smc_llc_is_local_add_link(llc)) {
1559 if (lgr->llc_flow_lcl.type ==
1560 SMC_LLC_FLOW_ADD_LINK)
1561 break; /* add_link in progress */
1562 if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1564 schedule_work(&lgr->llc_add_link_work);
1568 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1569 !lgr->llc_flow_lcl.qentry) {
1570 /* a flow is waiting for this message */
1571 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
1573 wake_up(&lgr->llc_msg_waiter);
1574 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
1576 schedule_work(&lgr->llc_add_link_work);
1578 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1579 /* as smc server, handle client suggestion */
1580 schedule_work(&lgr->llc_add_link_work);
1583 case SMC_LLC_CONFIRM_LINK:
1584 case SMC_LLC_ADD_LINK_CONT:
1585 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
1586 /* a flow is waiting for this message */
1587 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1588 wake_up(&lgr->llc_msg_waiter);
1592 case SMC_LLC_DELETE_LINK:
1593 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
1594 !lgr->llc_flow_lcl.qentry) {
1595 /* DEL LINK REQ during ADD LINK SEQ */
1596 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
1597 wake_up(&lgr->llc_msg_waiter);
1598 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
1599 schedule_work(&lgr->llc_del_link_work);
1602 case SMC_LLC_CONFIRM_RKEY:
1603 /* new request from remote, assign to remote flow */
1604 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1605 /* process here, does not wait for more llc msgs */
1606 smc_llc_rmt_conf_rkey(lgr);
1607 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1610 case SMC_LLC_CONFIRM_RKEY_CONT:
1611 /* not used because max links is 3, and 3 rkeys fit into
1612 * one CONFIRM_RKEY message
1615 case SMC_LLC_DELETE_RKEY:
1616 /* new request from remote, assign to remote flow */
1617 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
1618 /* process here, does not wait for more llc msgs */
1619 smc_llc_rmt_delete_rkey(lgr);
1620 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
1624 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type);
1631 /* worker to process llc messages on the event queue */
1632 static void smc_llc_event_work(struct work_struct *work)
1634 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
1636 struct smc_llc_qentry *qentry;
1638 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
1639 qentry = lgr->delayed_event;
1640 lgr->delayed_event = NULL;
1641 if (smc_link_usable(qentry->link))
1642 smc_llc_event_handler(qentry);
1648 spin_lock_bh(&lgr->llc_event_q_lock);
1649 if (!list_empty(&lgr->llc_event_q)) {
1650 qentry = list_first_entry(&lgr->llc_event_q,
1651 struct smc_llc_qentry, list);
1652 list_del_init(&qentry->list);
1653 spin_unlock_bh(&lgr->llc_event_q_lock);
1654 smc_llc_event_handler(qentry);
1657 spin_unlock_bh(&lgr->llc_event_q_lock);
1660 /* process llc responses in tasklet context */
1661 static void smc_llc_rx_response(struct smc_link *link,
1662 struct smc_llc_qentry *qentry)
1664 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
1665 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
1666 u8 llc_type = qentry->msg.raw.hdr.common.type;
1669 case SMC_LLC_TEST_LINK:
1670 if (smc_link_active(link))
1671 complete(&link->llc_testlink_resp);
1673 case SMC_LLC_ADD_LINK:
1674 case SMC_LLC_ADD_LINK_CONT:
1675 case SMC_LLC_CONFIRM_LINK:
1676 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
1677 break; /* drop out-of-flow response */
1679 case SMC_LLC_DELETE_LINK:
1680 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
1681 break; /* drop out-of-flow response */
1683 case SMC_LLC_CONFIRM_RKEY:
1684 case SMC_LLC_DELETE_RKEY:
1685 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
1686 break; /* drop out-of-flow response */
1688 case SMC_LLC_CONFIRM_RKEY_CONT:
1689 /* not used because max links is 3 */
1692 smc_llc_protocol_violation(link->lgr, llc_type);
1698 /* assign responses to the local flow, we requested them */
1699 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
1700 wake_up(&link->lgr->llc_msg_waiter);
1703 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
1705 struct smc_link_group *lgr = link->lgr;
1706 struct smc_llc_qentry *qentry;
1707 unsigned long flags;
1709 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
1712 qentry->link = link;
1713 INIT_LIST_HEAD(&qentry->list);
1714 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
1716 /* process responses immediately */
1717 if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
1718 smc_llc_rx_response(link, qentry);
1722 /* add requests to event queue */
1723 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
1724 list_add_tail(&qentry->list, &lgr->llc_event_q);
1725 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
1726 queue_work(system_highpri_wq, &lgr->llc_event_work);
1729 /* copy received msg and add it to the event queue */
1730 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
1732 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
1733 union smc_llc_msg *llc = buf;
1735 if (wc->byte_len < sizeof(*llc))
1736 return; /* short message */
1737 if (llc->raw.hdr.length != sizeof(*llc))
1738 return; /* invalid message */
1740 smc_llc_enqueue(link, llc);
1743 /***************************** worker, utils *********************************/
1745 static void smc_llc_testlink_work(struct work_struct *work)
1747 struct smc_link *link = container_of(to_delayed_work(work),
1748 struct smc_link, llc_testlink_wrk);
1749 unsigned long next_interval;
1750 unsigned long expire_time;
1751 u8 user_data[16] = { 0 };
1754 if (!smc_link_active(link))
1755 return; /* don't reschedule worker */
1756 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
1757 if (time_is_after_jiffies(expire_time)) {
1758 next_interval = expire_time - jiffies;
1761 reinit_completion(&link->llc_testlink_resp);
1762 smc_llc_send_test_link(link, user_data);
1763 /* receive TEST LINK response over RoCE fabric */
1764 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
1766 if (!smc_link_active(link))
1767 return; /* link state changed */
1769 smcr_link_down_cond_sched(link);
1772 next_interval = link->llc_testlink_time;
1774 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
1777 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
1779 struct net *net = sock_net(smc->clcsock->sk);
1781 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
1782 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work);
1783 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work);
1784 INIT_LIST_HEAD(&lgr->llc_event_q);
1785 spin_lock_init(&lgr->llc_event_q_lock);
1786 spin_lock_init(&lgr->llc_flow_lock);
1787 init_waitqueue_head(&lgr->llc_flow_waiter);
1788 init_waitqueue_head(&lgr->llc_msg_waiter);
1789 mutex_init(&lgr->llc_conf_mutex);
1790 lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1793 /* called after lgr was removed from lgr_list */
1794 void smc_llc_lgr_clear(struct smc_link_group *lgr)
1796 smc_llc_event_flush(lgr);
1797 wake_up_all(&lgr->llc_flow_waiter);
1798 wake_up_all(&lgr->llc_msg_waiter);
1799 cancel_work_sync(&lgr->llc_event_work);
1800 cancel_work_sync(&lgr->llc_add_link_work);
1801 cancel_work_sync(&lgr->llc_del_link_work);
1802 if (lgr->delayed_event) {
1803 kfree(lgr->delayed_event);
1804 lgr->delayed_event = NULL;
1808 int smc_llc_link_init(struct smc_link *link)
1810 init_completion(&link->llc_testlink_resp);
1811 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
1815 void smc_llc_link_active(struct smc_link *link)
1817 pr_warn_ratelimited("smc: SMC-R lg %*phN link added: id %*phN, "
1818 "peerid %*phN, ibdev %s, ibport %d\n",
1819 SMC_LGR_ID_SIZE, &link->lgr->id,
1820 SMC_LGR_ID_SIZE, &link->link_uid,
1821 SMC_LGR_ID_SIZE, &link->peer_link_uid,
1822 link->smcibdev->ibdev->name, link->ibport);
1823 link->state = SMC_LNK_ACTIVE;
1824 if (link->lgr->llc_testlink_time) {
1825 link->llc_testlink_time = link->lgr->llc_testlink_time;
1826 schedule_delayed_work(&link->llc_testlink_wrk,
1827 link->llc_testlink_time);
1831 /* called in worker context */
1832 void smc_llc_link_clear(struct smc_link *link, bool log)
1835 pr_warn_ratelimited("smc: SMC-R lg %*phN link removed: id %*phN"
1836 ", peerid %*phN, ibdev %s, ibport %d\n",
1837 SMC_LGR_ID_SIZE, &link->lgr->id,
1838 SMC_LGR_ID_SIZE, &link->link_uid,
1839 SMC_LGR_ID_SIZE, &link->peer_link_uid,
1840 link->smcibdev->ibdev->name, link->ibport);
1841 complete(&link->llc_testlink_resp);
1842 cancel_delayed_work_sync(&link->llc_testlink_wrk);
1845 /* register a new rtoken at the remote peer (for all links) */
1846 int smc_llc_do_confirm_rkey(struct smc_link *send_link,
1847 struct smc_buf_desc *rmb_desc)
1849 struct smc_link_group *lgr = send_link->lgr;
1850 struct smc_llc_qentry *qentry = NULL;
1853 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
1856 /* receive CONFIRM RKEY response from server over RoCE fabric */
1857 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1858 SMC_LLC_CONFIRM_RKEY);
1859 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1863 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1867 /* unregister an rtoken at the remote peer */
1868 int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
1869 struct smc_buf_desc *rmb_desc)
1871 struct smc_llc_qentry *qentry = NULL;
1872 struct smc_link *send_link;
1875 send_link = smc_llc_usable_link(lgr);
1879 /* protected by llc_flow control */
1880 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
1883 /* receive DELETE RKEY response from server over RoCE fabric */
1884 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
1885 SMC_LLC_DELETE_RKEY);
1886 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
1890 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
1894 void smc_llc_link_set_uid(struct smc_link *link)
1898 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id);
1899 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE);
1902 /* save peers link user id, used for debug purposes */
1903 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry)
1905 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid,
1909 /* evaluate confirm link request or response */
1910 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
1911 enum smc_llc_reqresp type)
1913 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */
1914 qentry->link->link_id = qentry->msg.confirm_link.link_num;
1915 smc_llc_link_set_uid(qentry->link);
1917 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
1922 /***************************** init, exit, misc ******************************/
1924 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
1926 .handler = smc_llc_rx_handler,
1927 .type = SMC_LLC_CONFIRM_LINK
1930 .handler = smc_llc_rx_handler,
1931 .type = SMC_LLC_TEST_LINK
1934 .handler = smc_llc_rx_handler,
1935 .type = SMC_LLC_ADD_LINK
1938 .handler = smc_llc_rx_handler,
1939 .type = SMC_LLC_ADD_LINK_CONT
1942 .handler = smc_llc_rx_handler,
1943 .type = SMC_LLC_DELETE_LINK
1946 .handler = smc_llc_rx_handler,
1947 .type = SMC_LLC_CONFIRM_RKEY
1950 .handler = smc_llc_rx_handler,
1951 .type = SMC_LLC_CONFIRM_RKEY_CONT
1954 .handler = smc_llc_rx_handler,
1955 .type = SMC_LLC_DELETE_RKEY
1962 int __init smc_llc_init(void)
1964 struct smc_wr_rx_handler *handler;
1967 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
1968 INIT_HLIST_NODE(&handler->list);
1969 rc = smc_wr_rx_register_handler(handler);