1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
8 * This file implements remote node state machines for:
10 * - Fabric controller events.
11 * - Name/directory services interaction.
12 * - Point-to-point logins.
16 * fabric_sm Node State Machine: Fabric States
17 * ns_sm Node State Machine: Name/Directory Services States
18 * p2p_sm Node State Machine: Point-to-Point Node States
24 efc_fabric_initiate_shutdown(struct efc_node *node)
26 struct efc *efc = node->efc;
28 node->els_io_enabled = false;
33 /* issue hw node free; don't care if succeeds right away
34 * or sometime later, will check node->attached later in
37 rc = efc_cmd_node_detach(efc, &node->rnode);
39 node_printf(node, "Failed freeing HW node, rc=%d\n",
44 * node has either been detached or is in the process of being detached,
45 * call common node's initiate cleanup function
47 efc_node_initiate_cleanup(node);
51 __efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
52 enum efc_sm_event evt, void *arg)
54 struct efc_node *node = NULL;
59 case EFC_EVT_DOMAIN_ATTACH_OK:
61 case EFC_EVT_SHUTDOWN:
62 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
63 efc_fabric_initiate_shutdown(node);
67 /* call default event handler common to all nodes */
68 __efc_node_common(funcname, ctx, evt, arg);
73 __efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
76 struct efc_node *node = ctx->app;
77 struct efc *efc = node->efc;
79 efc_node_evt_set(ctx, evt, __func__);
85 efc_log_debug(efc, ">>> reenter !!\n");
91 efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
95 __efc_fabric_common(__func__, ctx, evt, arg);
100 efc_fabric_set_topology(struct efc_node *node,
101 enum efc_nport_topology topology)
103 node->nport->topology = topology;
107 efc_fabric_notify_topology(struct efc_node *node)
109 struct efc_node *tmp_node;
110 enum efc_nport_topology topology = node->nport->topology;
114 * now loop through the nodes in the nport
115 * and send topology notification
117 xa_for_each(&node->nport->lookup, index, tmp_node) {
118 if (tmp_node != node) {
119 efc_node_post_event(tmp_node,
120 EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
126 static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
128 return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
132 __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
133 enum efc_sm_event evt, void *arg)
135 struct efc_node_cb *cbdata = arg;
136 struct efc_node *node = ctx->app;
138 efc_node_evt_set(ctx, evt, __func__);
143 case EFC_EVT_SRRS_ELS_REQ_OK: {
144 if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
145 __efc_fabric_common, __func__)) {
148 WARN_ON(!node->els_req_cnt);
151 memcpy(node->nport->domain->flogi_service_params,
152 cbdata->els_rsp.virt,
153 sizeof(struct fc_els_flogi));
155 /* Check to see if the fabric is an F_PORT or and N_PORT */
156 if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
157 /* sm: if not nport / efc_domain_attach */
158 /* ext_status has the fc_id, attach domain */
159 efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
160 efc_fabric_notify_topology(node);
161 WARN_ON(node->nport->domain->attached);
162 efc_domain_attach(node->nport->domain,
164 efc_node_transition(node,
165 __efc_fabric_wait_domain_attach,
170 /* sm: if nport and p2p_winner / efc_domain_attach */
171 efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
172 if (efc_p2p_setup(node->nport)) {
174 "p2p setup failed, shutting down node\n");
175 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
176 efc_fabric_initiate_shutdown(node);
180 if (node->nport->p2p_winner) {
181 efc_node_transition(node,
182 __efc_p2p_wait_domain_attach,
184 if (node->nport->domain->attached &&
185 !node->nport->domain->domain_notify_pend) {
188 * just send ATTACH_OK
191 "p2p winner, domain already attached\n");
192 efc_node_post_event(node,
193 EFC_EVT_DOMAIN_ATTACH_OK,
198 * peer is p2p winner;
199 * PLOGI will be received on the
201 * this node has served its purpose
203 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
204 efc_fabric_initiate_shutdown(node);
210 case EFC_EVT_ELS_REQ_ABORTED:
211 case EFC_EVT_SRRS_ELS_REQ_RJT:
212 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
213 struct efc_nport *nport = node->nport;
215 * with these errors, we have no recovery,
216 * so shutdown the nport, leave the link
217 * up and the domain ready
219 if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
220 __efc_fabric_common, __func__)) {
224 "FLOGI failed evt=%s, shutting down nport [%s]\n",
225 efc_sm_event_name(evt), nport->display_name);
226 WARN_ON(!node->els_req_cnt);
228 efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
233 __efc_fabric_common(__func__, ctx, evt, arg);
238 __efc_vport_fabric_init(struct efc_sm_ctx *ctx,
239 enum efc_sm_event evt, void *arg)
241 struct efc_node *node = ctx->app;
243 efc_node_evt_set(ctx, evt, __func__);
249 /* sm: / send FDISC */
250 efc_send_fdisc(node);
251 efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
255 __efc_fabric_common(__func__, ctx, evt, arg);
260 __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
261 enum efc_sm_event evt, void *arg)
263 struct efc_node_cb *cbdata = arg;
264 struct efc_node *node = ctx->app;
266 efc_node_evt_set(ctx, evt, __func__);
271 case EFC_EVT_SRRS_ELS_REQ_OK: {
272 /* fc_id is in ext_status */
273 if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
274 __efc_fabric_common, __func__)) {
278 WARN_ON(!node->els_req_cnt);
280 /* sm: / efc_nport_attach */
281 efc_nport_attach(node->nport, cbdata->ext_status);
282 efc_node_transition(node, __efc_fabric_wait_domain_attach,
287 case EFC_EVT_SRRS_ELS_REQ_RJT:
288 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
289 if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
290 __efc_fabric_common, __func__)) {
293 WARN_ON(!node->els_req_cnt);
295 efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
296 /* sm: / shutdown nport */
297 efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
302 __efc_fabric_common(__func__, ctx, evt, arg);
307 efc_start_ns_node(struct efc_nport *nport)
311 /* Instantiate a name services node */
312 ns = efc_node_find(nport, FC_FID_DIR_SERV);
314 ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
319 * for found ns, should we be transitioning from here?
320 * breaks transition only
321 * 1. from within state machine or
324 if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
325 efc_node_pause(ns, __efc_ns_init);
327 efc_node_transition(ns, __efc_ns_init, NULL);
332 efc_start_fabctl_node(struct efc_nport *nport)
334 struct efc_node *fabctl;
336 fabctl = efc_node_find(nport, FC_FID_FCTRL);
338 fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
344 * for found ns, should we be transitioning from here?
345 * breaks transition only
346 * 1. from within state machine or
349 efc_node_transition(fabctl, __efc_fabctl_init, NULL);
354 __efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
355 enum efc_sm_event evt, void *arg)
357 struct efc_node *node = ctx->app;
359 efc_node_evt_set(ctx, evt, __func__);
365 efc_node_hold_frames(node);
369 efc_node_accept_frames(node);
371 case EFC_EVT_DOMAIN_ATTACH_OK:
372 case EFC_EVT_NPORT_ATTACH_OK: {
375 rc = efc_start_ns_node(node->nport);
379 /* sm: if enable_ini / start fabctl node */
380 /* Instantiate the fabric controller (sends SCR) */
381 if (node->nport->enable_rscn) {
382 rc = efc_start_fabctl_node(node->nport);
386 efc_node_transition(node, __efc_fabric_idle, NULL);
390 __efc_fabric_common(__func__, ctx, evt, arg);
395 __efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
398 struct efc_node *node = ctx->app;
400 efc_node_evt_set(ctx, evt, __func__);
405 case EFC_EVT_DOMAIN_ATTACH_OK:
408 __efc_fabric_common(__func__, ctx, evt, arg);
413 __efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
415 struct efc_node *node = ctx->app;
417 efc_node_evt_set(ctx, evt, __func__);
423 /* sm: / send PLOGI */
424 efc_send_plogi(node);
425 efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
428 __efc_fabric_common(__func__, ctx, evt, arg);
433 __efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
434 enum efc_sm_event evt, void *arg)
436 struct efc_node_cb *cbdata = arg;
437 struct efc_node *node = ctx->app;
439 efc_node_evt_set(ctx, evt, __func__);
444 case EFC_EVT_SRRS_ELS_REQ_OK: {
447 /* Save service parameters */
448 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
449 __efc_fabric_common, __func__)) {
452 WARN_ON(!node->els_req_cnt);
454 /* sm: / save sparams, efc_node_attach */
455 efc_node_save_sparms(node, cbdata->els_rsp.virt);
456 rc = efc_node_attach(node);
457 efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
459 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
464 __efc_fabric_common(__func__, ctx, evt, arg);
469 __efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
470 enum efc_sm_event evt, void *arg)
472 struct efc_node *node = ctx->app;
474 efc_node_evt_set(ctx, evt, __func__);
480 efc_node_hold_frames(node);
484 efc_node_accept_frames(node);
487 case EFC_EVT_NODE_ATTACH_OK:
488 node->attached = true;
489 /* sm: / send RFTID */
490 efc_ns_send_rftid(node);
491 efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
494 case EFC_EVT_NODE_ATTACH_FAIL:
495 /* node attach failed, shutdown the node */
496 node->attached = false;
497 node_printf(node, "Node attach failed\n");
498 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
499 efc_fabric_initiate_shutdown(node);
502 case EFC_EVT_SHUTDOWN:
503 node_printf(node, "Shutdown event received\n");
504 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
505 efc_node_transition(node,
506 __efc_fabric_wait_attach_evt_shutdown,
511 * if receive RSCN just ignore,
512 * we haven't sent GID_PT yet (ACC sent by fabctl node)
514 case EFC_EVT_RSCN_RCVD:
518 __efc_fabric_common(__func__, ctx, evt, arg);
523 __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
524 enum efc_sm_event evt, void *arg)
526 struct efc_node *node = ctx->app;
528 efc_node_evt_set(ctx, evt, __func__);
534 efc_node_hold_frames(node);
538 efc_node_accept_frames(node);
541 /* wait for any of these attach events and then shutdown */
542 case EFC_EVT_NODE_ATTACH_OK:
543 node->attached = true;
544 node_printf(node, "Attach evt=%s, proceed to shutdown\n",
545 efc_sm_event_name(evt));
546 efc_fabric_initiate_shutdown(node);
549 case EFC_EVT_NODE_ATTACH_FAIL:
550 node->attached = false;
551 node_printf(node, "Attach evt=%s, proceed to shutdown\n",
552 efc_sm_event_name(evt));
553 efc_fabric_initiate_shutdown(node);
556 /* ignore shutdown event as we're already in shutdown path */
557 case EFC_EVT_SHUTDOWN:
558 node_printf(node, "Shutdown event received\n");
562 __efc_fabric_common(__func__, ctx, evt, arg);
567 __efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
568 enum efc_sm_event evt, void *arg)
570 struct efc_node *node = ctx->app;
572 efc_node_evt_set(ctx, evt, __func__);
577 case EFC_EVT_SRRS_ELS_REQ_OK:
578 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
579 __efc_fabric_common, __func__)) {
582 WARN_ON(!node->els_req_cnt);
584 /* sm: / send RFFID */
585 efc_ns_send_rffid(node);
586 efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
590 * if receive RSCN just ignore,
591 * we haven't sent GID_PT yet (ACC sent by fabctl node)
593 case EFC_EVT_RSCN_RCVD:
597 __efc_fabric_common(__func__, ctx, evt, arg);
602 __efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
603 enum efc_sm_event evt, void *arg)
605 struct efc_node *node = ctx->app;
607 efc_node_evt_set(ctx, evt, __func__);
612 * Waits for an RFFID response event;
613 * if rscn enabled, a GIDPT name services request is issued.
616 case EFC_EVT_SRRS_ELS_REQ_OK: {
617 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
618 __efc_fabric_common, __func__)) {
621 WARN_ON(!node->els_req_cnt);
623 if (node->nport->enable_rscn) {
624 /* sm: if enable_rscn / send GIDPT */
625 efc_ns_send_gidpt(node);
627 efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
630 /* if 'T' only, we're done, go to idle */
631 efc_node_transition(node, __efc_ns_idle, NULL);
636 * if receive RSCN just ignore,
637 * we haven't sent GID_PT yet (ACC sent by fabctl node)
639 case EFC_EVT_RSCN_RCVD:
643 __efc_fabric_common(__func__, ctx, evt, arg);
648 efc_process_gidpt_payload(struct efc_node *node,
649 void *data, u32 gidpt_len)
652 struct efc_node *newnode;
653 struct efc_nport *nport = node->nport;
654 struct efc *efc = node->efc;
655 u32 port_id = 0, port_count, plist_count;
657 struct efc_node **active_nodes;
660 struct fc_ct_hdr hdr;
661 struct fc_gid_pn_resp pn_rsp;
663 struct fc_gid_pn_resp *gidpt;
667 gidpt = &rsp->pn_rsp;
668 residual = be16_to_cpu(rsp->hdr.ct_mr_size);
671 efc_log_debug(node->efc, "residual is %u words\n", residual);
673 if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
675 "GIDPT request failed: rsn x%x rsn_expl x%x\n",
676 rsp->hdr.ct_reason, rsp->hdr.ct_explan);
680 plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
682 /* Count the number of nodes */
684 xa_for_each(&nport->lookup, index, n) {
688 /* Allocate a buffer for all nodes */
689 active_nodes = kzalloc(port_count * sizeof(*active_nodes), GFP_ATOMIC);
691 node_printf(node, "efc_malloc failed\n");
695 /* Fill buffer with fc_id of active nodes */
697 xa_for_each(&nport->lookup, index, n) {
698 port_id = n->rnode.fc_id;
702 case FC_FID_DIR_SERV:
705 if (port_id != FC_FID_DOM_MGR)
706 active_nodes[i++] = n;
711 /* update the active nodes buffer */
712 for (i = 0; i < plist_count; i++) {
713 hton24(gidpt[i].fp_fid, port_id);
715 for (j = 0; j < port_count; j++) {
716 if (active_nodes[j] &&
717 port_id == active_nodes[j]->rnode.fc_id) {
718 active_nodes[j] = NULL;
722 if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
726 /* Those remaining in the active_nodes[] are now gone ! */
727 for (i = 0; i < port_count; i++) {
729 * if we're an initiator and the remote node
730 * is a target, then post the node missing event.
731 * if we're target and we have enabled
732 * target RSCN, then post the node missing event.
734 if (!active_nodes[i])
737 if ((node->nport->enable_ini && active_nodes[i]->targ) ||
738 (node->nport->enable_tgt && enable_target_rscn(efc))) {
739 efc_node_post_event(active_nodes[i],
740 EFC_EVT_NODE_MISSING, NULL);
743 "GID_PT: skipping non-tgt port_id x%06x\n",
744 active_nodes[i]->rnode.fc_id);
749 for (i = 0; i < plist_count; i++) {
750 hton24(gidpt[i].fp_fid, port_id);
752 /* Don't create node for ourselves */
753 if (port_id == node->rnode.nport->fc_id) {
754 if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
759 newnode = efc_node_find(nport, port_id);
761 if (!node->nport->enable_ini)
764 newnode = efc_node_alloc(nport, port_id, false, false);
766 efc_log_err(efc, "efc_node_alloc() failed\n");
770 * send PLOGI automatically
773 efc_node_init_device(newnode, true);
776 if (node->nport->enable_ini && newnode->targ) {
777 efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
781 if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
788 __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
789 enum efc_sm_event evt, void *arg)
791 struct efc_node_cb *cbdata = arg;
792 struct efc_node *node = ctx->app;
794 efc_node_evt_set(ctx, evt, __func__);
798 * Wait for a GIDPT response from the name server. Process the FC_IDs
799 * that are reported by creating new remote ports, as needed.
803 case EFC_EVT_SRRS_ELS_REQ_OK: {
804 if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
805 __efc_fabric_common, __func__)) {
808 WARN_ON(!node->els_req_cnt);
810 /* sm: / process GIDPT payload */
811 efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
812 cbdata->els_rsp.len);
813 efc_node_transition(node, __efc_ns_idle, NULL);
817 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
818 /* not much we can do; will retry with the next RSCN */
819 node_printf(node, "GID_PT failed to complete\n");
820 WARN_ON(!node->els_req_cnt);
822 efc_node_transition(node, __efc_ns_idle, NULL);
826 /* if receive RSCN here, queue up another discovery processing */
827 case EFC_EVT_RSCN_RCVD: {
828 node_printf(node, "RSCN received during GID_PT processing\n");
829 node->rscn_pending = true;
834 __efc_fabric_common(__func__, ctx, evt, arg);
839 __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
841 struct efc_node *node = ctx->app;
842 struct efc *efc = node->efc;
844 efc_node_evt_set(ctx, evt, __func__);
849 * Wait for RSCN received events (posted from the fabric controller)
850 * and restart the GIDPT name services query and processing.
855 if (!node->rscn_pending)
858 node_printf(node, "RSCN pending, restart discovery\n");
859 node->rscn_pending = false;
862 case EFC_EVT_RSCN_RCVD: {
863 /* sm: / send GIDPT */
865 * If target RSCN processing is enabled,
866 * and this is target only (not initiator),
867 * and tgt_rscn_delay is non-zero,
868 * then we delay issuing the GID_PT
870 if (efc->tgt_rscn_delay_msec != 0 &&
871 !node->nport->enable_ini && node->nport->enable_tgt &&
872 enable_target_rscn(efc)) {
873 efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
875 efc_ns_send_gidpt(node);
876 efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
883 __efc_fabric_common(__func__, ctx, evt, arg);
888 gidpt_delay_timer_cb(struct timer_list *t)
890 struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
892 del_timer(&node->gidpt_delay_timer);
894 efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
898 __efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
899 enum efc_sm_event evt, void *arg)
901 struct efc_node *node = ctx->app;
902 struct efc *efc = node->efc;
904 efc_node_evt_set(ctx, evt, __func__);
909 case EFC_EVT_ENTER: {
913 * Compute the delay time.
914 * Set to tgt_rscn_delay, if the time since last GIDPT
915 * is less than tgt_rscn_period, then use tgt_rscn_period.
917 delay_msec = efc->tgt_rscn_delay_msec;
918 tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
919 if (tmp < efc->tgt_rscn_period_msec)
920 delay_msec = efc->tgt_rscn_period_msec;
922 timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
924 mod_timer(&node->gidpt_delay_timer,
925 jiffies + msecs_to_jiffies(delay_msec));
930 case EFC_EVT_GIDPT_DELAY_EXPIRED:
931 node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
933 efc_ns_send_gidpt(node);
934 efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
937 case EFC_EVT_RSCN_RCVD: {
939 "RSCN received while in GIDPT delay - no action\n");
944 __efc_fabric_common(__func__, ctx, evt, arg);
949 __efc_fabctl_init(struct efc_sm_ctx *ctx,
950 enum efc_sm_event evt, void *arg)
952 struct efc_node *node = ctx->app;
958 /* no need to login to fabric controller, just send SCR */
960 efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
963 case EFC_EVT_NODE_ATTACH_OK:
964 node->attached = true;
968 __efc_fabric_common(__func__, ctx, evt, arg);
973 __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
974 enum efc_sm_event evt, void *arg)
976 struct efc_node *node = ctx->app;
978 efc_node_evt_set(ctx, evt, __func__);
983 * Fabric controller node state machine:
984 * Wait for an SCR response from the fabric controller.
987 case EFC_EVT_SRRS_ELS_REQ_OK:
988 if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
989 __efc_fabric_common, __func__)) {
992 WARN_ON(!node->els_req_cnt);
994 efc_node_transition(node, __efc_fabctl_ready, NULL);
998 __efc_fabric_common(__func__, ctx, evt, arg);
1003 efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
1005 struct efc *efc = node->efc;
1006 struct efc_nport *nport = node->nport;
1007 struct efc_node *ns;
1009 /* Forward this event to the name-services node */
1010 ns = efc_node_find(nport, FC_FID_DIR_SERV);
1012 efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
1014 efc_log_warn(efc, "can't find name server node\n");
1018 __efc_fabctl_ready(struct efc_sm_ctx *ctx,
1019 enum efc_sm_event evt, void *arg)
1021 struct efc_node_cb *cbdata = arg;
1022 struct efc_node *node = ctx->app;
1024 efc_node_evt_set(ctx, evt, __func__);
1029 * Fabric controller node state machine: Ready.
1030 * In this state, the fabric controller sends a RSCN, which is received
1031 * by this node and is forwarded to the name services node object; and
1032 * the RSCN LS_ACC is sent.
1035 case EFC_EVT_RSCN_RCVD: {
1036 struct fc_frame_header *hdr = cbdata->header->dma.virt;
1039 * sm: / process RSCN (forward to name services node),
1042 efc_process_rscn(node, cbdata);
1043 efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
1044 efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
1050 __efc_fabric_common(__func__, ctx, evt, arg);
1055 __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
1056 enum efc_sm_event evt, void *arg)
1058 struct efc_node *node = ctx->app;
1060 efc_node_evt_set(ctx, evt, __func__);
1066 efc_node_hold_frames(node);
1070 efc_node_accept_frames(node);
1073 case EFC_EVT_SRRS_ELS_CMPL_OK:
1074 WARN_ON(!node->els_cmpl_cnt);
1075 node->els_cmpl_cnt--;
1076 efc_node_transition(node, __efc_fabctl_ready, NULL);
1080 __efc_fabric_common(__func__, ctx, evt, arg);
1085 efc_get_wwpn(struct fc_els_flogi *sp)
1087 return be64_to_cpu(sp->fl_wwnn);
1091 efc_rnode_is_winner(struct efc_nport *nport)
1093 struct fc_els_flogi *remote_sp;
1095 u64 local_wwpn = nport->wwpn;
1098 remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
1099 remote_wwpn = efc_get_wwpn(remote_sp);
1101 local_wwpn ^= wwn_bump;
1103 efc_log_debug(nport->efc, "r: %llx\n",
1104 be64_to_cpu(remote_sp->fl_wwpn));
1105 efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
1107 if (remote_wwpn == local_wwpn) {
1108 efc_log_warn(nport->efc,
1109 "WWPN of remote node [%08x %08x] matches local WWPN\n",
1110 (u32)(local_wwpn >> 32ll),
1115 return (remote_wwpn > local_wwpn);
1119 __efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
1120 enum efc_sm_event evt, void *arg)
1122 struct efc_node *node = ctx->app;
1123 struct efc *efc = node->efc;
1125 efc_node_evt_set(ctx, evt, __func__);
1131 efc_node_hold_frames(node);
1135 efc_node_accept_frames(node);
1138 case EFC_EVT_DOMAIN_ATTACH_OK: {
1139 struct efc_nport *nport = node->nport;
1140 struct efc_node *rnode;
1143 * this transient node (SID=0 (recv'd FLOGI)
1144 * or DID=fabric (sent FLOGI))
1145 * is the p2p winner, will use a separate node
1146 * to send PLOGI to peer
1148 WARN_ON(!node->nport->p2p_winner);
1150 rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
1153 * the "other" transient p2p node has
1154 * already kicked off the
1155 * new node from which PLOGI is sent
1158 "Node with fc_id x%x already exists\n",
1159 rnode->rnode.fc_id);
1162 * create new node (SID=1, DID=2)
1163 * from which to send PLOGI
1165 rnode = efc_node_alloc(nport,
1166 nport->p2p_remote_port_id,
1169 efc_log_err(efc, "node alloc failed\n");
1173 efc_fabric_notify_topology(node);
1174 /* sm: / allocate p2p remote node */
1175 efc_node_transition(rnode, __efc_p2p_rnode_init,
1180 * the transient node (SID=0 or DID=fabric)
1181 * has served its purpose
1183 if (node->rnode.fc_id == 0) {
1185 * if this is the SID=0 node,
1186 * move to the init state in case peer
1187 * has restarted FLOGI discovery and FLOGI is pending
1189 /* don't send PLOGI on efc_d_init entry */
1190 efc_node_init_device(node, false);
1193 * if this is the DID=fabric node
1194 * (we initiated FLOGI), shut it down
1196 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1197 efc_fabric_initiate_shutdown(node);
1203 __efc_fabric_common(__func__, ctx, evt, arg);
1208 __efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
1209 enum efc_sm_event evt, void *arg)
1211 struct efc_node_cb *cbdata = arg;
1212 struct efc_node *node = ctx->app;
1214 efc_node_evt_set(ctx, evt, __func__);
1220 /* sm: / send PLOGI */
1221 efc_send_plogi(node);
1222 efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
1225 case EFC_EVT_ABTS_RCVD:
1226 /* sm: send BA_ACC */
1227 efc_send_bls_acc(node, cbdata->header->dma.virt);
1232 __efc_fabric_common(__func__, ctx, evt, arg);
1237 __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
1238 enum efc_sm_event evt, void *arg)
1240 struct efc_node_cb *cbdata = arg;
1241 struct efc_node *node = ctx->app;
1243 efc_node_evt_set(ctx, evt, __func__);
1249 efc_node_hold_frames(node);
1253 efc_node_accept_frames(node);
1256 case EFC_EVT_SRRS_ELS_CMPL_OK:
1257 WARN_ON(!node->els_cmpl_cnt);
1258 node->els_cmpl_cnt--;
1260 /* sm: if p2p_winner / domain_attach */
1261 if (node->nport->p2p_winner) {
1262 efc_node_transition(node,
1263 __efc_p2p_wait_domain_attach,
1265 if (!node->nport->domain->attached) {
1266 node_printf(node, "Domain not attached\n");
1267 efc_domain_attach(node->nport->domain,
1268 node->nport->p2p_port_id);
1270 node_printf(node, "Domain already attached\n");
1271 efc_node_post_event(node,
1272 EFC_EVT_DOMAIN_ATTACH_OK,
1276 /* this node has served its purpose;
1277 * we'll expect a PLOGI on a separate
1278 * node (remote SID=0x1); return this node
1279 * to init state in case peer
1280 * restarts discovery -- it may already
1281 * have (pending frames may exist).
1283 /* don't send PLOGI on efc_d_init entry */
1284 efc_node_init_device(node, false);
1288 case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1290 * LS_ACC failed, possibly due to link down;
1291 * shutdown node and wait
1292 * for FLOGI discovery to restart
1294 node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1295 WARN_ON(!node->els_cmpl_cnt);
1296 node->els_cmpl_cnt--;
1297 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1298 efc_fabric_initiate_shutdown(node);
1301 case EFC_EVT_ABTS_RCVD: {
1302 /* sm: / send BA_ACC */
1303 efc_send_bls_acc(node, cbdata->header->dma.virt);
1308 __efc_fabric_common(__func__, ctx, evt, arg);
1313 __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
1314 enum efc_sm_event evt, void *arg)
1316 struct efc_node_cb *cbdata = arg;
1317 struct efc_node *node = ctx->app;
1319 efc_node_evt_set(ctx, evt, __func__);
1324 case EFC_EVT_SRRS_ELS_REQ_OK: {
1327 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1328 __efc_fabric_common, __func__)) {
1331 WARN_ON(!node->els_req_cnt);
1332 node->els_req_cnt--;
1333 /* sm: / save sparams, efc_node_attach */
1334 efc_node_save_sparms(node, cbdata->els_rsp.virt);
1335 rc = efc_node_attach(node);
1336 efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1338 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1342 case EFC_EVT_SRRS_ELS_REQ_FAIL: {
1343 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1344 __efc_fabric_common, __func__)) {
1347 node_printf(node, "PLOGI failed, shutting down\n");
1348 WARN_ON(!node->els_req_cnt);
1349 node->els_req_cnt--;
1350 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1351 efc_fabric_initiate_shutdown(node);
1355 case EFC_EVT_PLOGI_RCVD: {
1356 struct fc_frame_header *hdr = cbdata->header->dma.virt;
1357 /* if we're in external loopback mode, just send LS_ACC */
1358 if (node->efc->external_loopback) {
1359 efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
1362 * if this isn't external loopback,
1363 * pass to default handler
1365 __efc_fabric_common(__func__, ctx, evt, arg);
1369 case EFC_EVT_PRLI_RCVD:
1371 /* sent PLOGI and before completion was seen, received the
1372 * PRLI from the remote node (WCQEs and RCQEs come in on
1373 * different queues and order of processing cannot be assumed)
1374 * Save OXID so PRLI can be sent after the attach and continue
1375 * to wait for PLOGI response
1377 efc_process_prli_payload(node, cbdata->payload->dma.virt);
1378 efc_send_ls_acc_after_attach(node,
1379 cbdata->header->dma.virt,
1380 EFC_NODE_SEND_LS_ACC_PRLI);
1381 efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
1385 __efc_fabric_common(__func__, ctx, evt, arg);
1390 __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
1391 enum efc_sm_event evt, void *arg)
1393 struct efc_node_cb *cbdata = arg;
1394 struct efc_node *node = ctx->app;
1396 efc_node_evt_set(ctx, evt, __func__);
1403 * Since we've received a PRLI, we have a port login and will
1404 * just need to wait for the PLOGI response to do the node
1405 * attach and then we can send the LS_ACC for the PRLI. If,
1406 * during this time, we receive FCP_CMNDs (which is possible
1407 * since we've already sent a PRLI and our peer may have
1409 * At this time, we are not waiting on any other unsolicited
1410 * frames to continue with the login process. Thus, it will not
1411 * hurt to hold frames here.
1413 efc_node_hold_frames(node);
1417 efc_node_accept_frames(node);
1420 case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
1423 /* Completion from PLOGI sent */
1424 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1425 __efc_fabric_common, __func__)) {
1428 WARN_ON(!node->els_req_cnt);
1429 node->els_req_cnt--;
1430 /* sm: / save sparams, efc_node_attach */
1431 efc_node_save_sparms(node, cbdata->els_rsp.virt);
1432 rc = efc_node_attach(node);
1433 efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1435 efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1439 case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1440 case EFC_EVT_SRRS_ELS_REQ_RJT:
1441 /* PLOGI failed, shutdown the node */
1442 if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1443 __efc_fabric_common, __func__)) {
1446 WARN_ON(!node->els_req_cnt);
1447 node->els_req_cnt--;
1448 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1449 efc_fabric_initiate_shutdown(node);
1453 __efc_fabric_common(__func__, ctx, evt, arg);
1458 __efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
1459 enum efc_sm_event evt, void *arg)
1461 struct efc_node_cb *cbdata = arg;
1462 struct efc_node *node = ctx->app;
1464 efc_node_evt_set(ctx, evt, __func__);
1470 efc_node_hold_frames(node);
1474 efc_node_accept_frames(node);
1477 case EFC_EVT_NODE_ATTACH_OK:
1478 node->attached = true;
1479 switch (node->send_ls_acc) {
1480 case EFC_NODE_SEND_LS_ACC_PRLI: {
1481 efc_d_send_prli_rsp(node->ls_acc_io,
1483 node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1484 node->ls_acc_io = NULL;
1487 case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1488 case EFC_NODE_SEND_LS_ACC_NONE:
1490 /* Normal case for I */
1491 /* sm: send_plogi_acc is not set / send PLOGI acc */
1492 efc_node_transition(node, __efc_d_port_logged_in,
1498 case EFC_EVT_NODE_ATTACH_FAIL:
1499 /* node attach failed, shutdown the node */
1500 node->attached = false;
1501 node_printf(node, "Node attach failed\n");
1502 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1503 efc_fabric_initiate_shutdown(node);
1506 case EFC_EVT_SHUTDOWN:
1507 node_printf(node, "%s received\n", efc_sm_event_name(evt));
1508 node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1509 efc_node_transition(node,
1510 __efc_fabric_wait_attach_evt_shutdown,
1513 case EFC_EVT_PRLI_RCVD:
1514 node_printf(node, "%s: PRLI received before node is attached\n",
1515 efc_sm_event_name(evt));
1516 efc_process_prli_payload(node, cbdata->payload->dma.virt);
1517 efc_send_ls_acc_after_attach(node,
1518 cbdata->header->dma.virt,
1519 EFC_NODE_SEND_LS_ACC_PRLI);
1523 __efc_fabric_common(__func__, ctx, evt, arg);
1528 efc_p2p_setup(struct efc_nport *nport)
1530 struct efc *efc = nport->efc;
1533 rnode_winner = efc_rnode_is_winner(nport);
1535 /* set nport flags to indicate p2p "winner" */
1536 if (rnode_winner == 1) {
1537 nport->p2p_remote_port_id = 0;
1538 nport->p2p_port_id = 0;
1539 nport->p2p_winner = false;
1540 } else if (rnode_winner == 0) {
1541 nport->p2p_remote_port_id = 2;
1542 nport->p2p_port_id = 1;
1543 nport->p2p_winner = true;
1545 /* no winner; only okay if external loopback enabled */
1546 if (nport->efc->external_loopback) {
1548 * External loopback mode enabled;
1549 * local nport and remote node
1550 * will be registered with an NPortID = 1;
1553 "External loopback mode enabled\n");
1554 nport->p2p_remote_port_id = 1;
1555 nport->p2p_port_id = 1;
1556 nport->p2p_winner = true;
1559 "failed to determine p2p winner\n");
1560 return rnode_winner;