2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/string.h>
36 #include <scsi/scsi_device.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_els.h>
39 #include <scsi/fc/fc_fs.h>
42 #include "csio_lnode.h"
43 #include "csio_rnode.h"
45 static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
46 static void csio_rnode_exit(struct csio_rnode *);
48 /* Static machine forward declarations */
49 static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
50 static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
51 static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
52 static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
54 /* RNF event mapping */
55 static enum csio_rn_ev fwevt_to_rnevt[] = {
56 CSIO_RNFE_NONE, /* None */
57 CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
58 CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
59 CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
60 CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
61 CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
62 CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
63 CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
64 CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
65 CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
66 CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
67 CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
68 CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
69 CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
70 CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
71 CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
72 CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
73 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
74 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
75 CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
76 CSIO_RNFE_NONE, /* PRLI_TMO */
77 CSIO_RNFE_NONE, /* ADISC_TMO */
78 CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
79 CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
80 CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
81 CSIO_RNFE_NONE, /* LOGO_SNT */
82 CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
85 #define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
89 csio_is_rnode_ready(struct csio_rnode *rn)
91 return csio_match_state(rn, csio_rns_ready);
95 csio_is_rnode_uninit(struct csio_rnode *rn)
97 return csio_match_state(rn, csio_rns_uninit);
101 csio_is_rnode_wka(uint8_t rport_type)
103 if ((rport_type == FLOGI_VFPORT) ||
104 (rport_type == FDISC_VFPORT) ||
105 (rport_type == NS_VNPORT) ||
106 (rport_type == FDMI_VNPORT))
113 * csio_rn_lookup - Finds the rnode with the given flowid
117 * Does the rnode lookup on the given lnode and flowid.If no matching entry
118 * found, NULL is returned.
120 static struct csio_rnode *
121 csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
123 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
124 struct list_head *tmp;
125 struct csio_rnode *rn;
127 list_for_each(tmp, &rnhead->sm.sm_list) {
128 rn = (struct csio_rnode *) tmp;
129 if (rn->flowid == flowid)
137 * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
141 * Does the rnode lookup on the given lnode and wwpn. If no matching entry
142 * found, NULL is returned.
144 static struct csio_rnode *
145 csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
147 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
148 struct list_head *tmp;
149 struct csio_rnode *rn;
151 list_for_each(tmp, &rnhead->sm.sm_list) {
152 rn = (struct csio_rnode *) tmp;
153 if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
161 * csio_rnode_lookup_portid - Finds the rnode with the given portid
165 * Lookup the rnode list for a given portid. If no matching entry
166 * found, NULL is returned.
169 csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
171 struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
172 struct list_head *tmp;
173 struct csio_rnode *rn;
175 list_for_each(tmp, &rnhead->sm.sm_list) {
176 rn = (struct csio_rnode *) tmp;
177 if (rn->nport_id == portid)
185 csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
186 uint32_t *vnp_flowid)
188 struct csio_rnode *rnhead;
189 struct list_head *tmp, *tmp1;
190 struct csio_rnode *rn;
191 struct csio_lnode *ln_tmp;
192 struct csio_hw *hw = csio_lnode_to_hw(ln);
194 list_for_each(tmp1, &hw->sln_head) {
195 ln_tmp = (struct csio_lnode *) tmp1;
199 rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
200 list_for_each(tmp, &rnhead->sm.sm_list) {
202 rn = (struct csio_rnode *) tmp;
203 if (csio_is_rnode_ready(rn)) {
204 if (rn->flowid == rdev_flowid) {
205 *vnp_flowid = csio_ln_flowid(ln_tmp);
215 static struct csio_rnode *
216 csio_alloc_rnode(struct csio_lnode *ln)
218 struct csio_hw *hw = csio_lnode_to_hw(ln);
220 struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
224 memset(rn, 0, sizeof(struct csio_rnode));
225 if (csio_rnode_init(rn, ln))
228 CSIO_INC_STATS(ln, n_rnode_alloc);
233 mempool_free(rn, hw->rnode_mempool);
235 CSIO_INC_STATS(ln, n_rnode_nomem);
240 csio_free_rnode(struct csio_rnode *rn)
242 struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
245 CSIO_INC_STATS(rn->lnp, n_rnode_free);
246 mempool_free(rn, hw->rnode_mempool);
250 * csio_get_rnode - Gets rnode with the given flowid
254 * Does the rnode lookup on the given lnode and flowid. If no matching
255 * rnode found, then new rnode with given npid is allocated and returned.
257 static struct csio_rnode *
258 csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
260 struct csio_rnode *rn;
262 rn = csio_rn_lookup(ln, flowid);
264 rn = csio_alloc_rnode(ln);
275 * csio_put_rnode - Frees the given rnode
279 * Does the rnode lookup on the given lnode and flowid. If no matching
280 * rnode found, then new rnode with given npid is allocated and returned.
283 csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
285 CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
290 * csio_confirm_rnode - confirms rnode based on wwpn.
292 * @rdev_flowid: remote device flowid
293 * @rdevp: remote device params
294 * This routines searches other rnode in list having same wwpn of new rnode.
295 * If there is a match, then matched rnode is returned and otherwise new rnode
300 csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
301 struct fcoe_rdev_entry *rdevp)
304 struct csio_rnode *rn, *match_rn;
308 port_id = (__be32 *)&rdevp->r_id[0];
310 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
312 /* Drop rdev event for cntrl port */
313 if (rport_type == FAB_CTLR_VNPORT) {
315 "Unhandled rport_type:%d recv in rdev evt "
316 "ssni:x%x\n", rport_type, rdev_flowid);
320 /* Lookup on flowid */
321 rn = csio_rn_lookup(ln, rdev_flowid);
324 /* Drop events with duplicate flowid */
325 if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
327 "ssni:%x already active on vnpi:%x",
328 rdev_flowid, vnp_flowid);
332 /* Lookup on wwpn for NPORTs */
333 rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
338 /* Lookup well-known ports with nport id */
339 if (csio_is_rnode_wka(rport_type)) {
340 match_rn = csio_rnode_lookup_portid(ln,
341 ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
342 if (match_rn == NULL) {
343 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
348 * Now compare the wwpn to confirm that
349 * same port relogged in. If so update the matched rn.
350 * Else, go ahead and alloc a new rnode.
352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 if (csio_is_rnode_ready(rn)) {
360 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
366 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
371 if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
374 /* Search for rnode that have same wwpn */
375 match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
376 if (match_rn != NULL) {
378 "ssni:x%x changed for rport name(wwpn):%llx "
379 "did:x%x\n", rdev_flowid,
380 wwn_to_u64(rdevp->wwpn),
382 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
386 "rnode wwpn mismatch found ssni:x%x "
389 wwn_to_u64(csio_rn_wwpn(rn)));
390 if (csio_is_rnode_ready(rn)) {
392 "rnode is already active "
393 "wwpn:%llx ssni:x%x\n",
394 wwn_to_u64(csio_rn_wwpn(rn)),
398 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
404 csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
405 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
408 csio_rn_flowid(rn) = rdev_flowid;
410 /* update rdev entry */
411 rn->rdev_entry = rdevp;
412 CSIO_INC_STATS(ln, n_rnode_match);
416 rn = csio_get_rnode(ln, rdev_flowid);
420 csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
421 rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
423 /* update rdev entry */
424 rn->rdev_entry = rdevp;
429 * csio_rn_verify_rparams - verify rparams.
432 * @rdevp: remote device params
433 * returns success if rparams are verified.
436 csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
437 struct fcoe_rdev_entry *rdevp)
444 did = (__be32 *) &rdevp->r_id[0];
446 FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
447 switch (rport_type) {
449 rn->role = CSIO_RNFR_FABRIC;
450 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
451 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
456 if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
457 ln->flags |= CSIO_LNF_NPIVSUPP;
462 rn->role = CSIO_RNFR_NS;
463 if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
464 csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
472 rn->role = CSIO_RNFR_NPORT;
473 if (rdevp->event_cause == PRLI_ACC_RCVD ||
474 rdevp->event_cause == PRLI_RCVD) {
475 if (FW_RDEV_WR_TASK_RETRY_ID_GET(
476 rdevp->enh_disc_to_tgt))
477 rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
479 if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
480 rn->fcp_flags |= FCP_SPPF_RETRY;
482 if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
483 rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
485 if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
486 rn->role |= CSIO_RNFR_TARGET;
488 if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
489 rn->role |= CSIO_RNFR_INITIATOR;
495 case FAB_CTLR_VNPORT:
500 csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
501 csio_rn_flowid(rn), rport_type);
505 /* validate wwpn/wwnn for Name server/remote port */
506 if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
508 if (!memcmp(rdevp->wwnn, null, 8)) {
510 "ssni:x%x invalid wwnn received from"
513 (ntohl(*did) & CSIO_DID_MASK));
517 if (!memcmp(rdevp->wwpn, null, 8)) {
519 "ssni:x%x invalid wwpn received from"
522 (ntohl(*did) & CSIO_DID_MASK));
528 /* Copy wwnn, wwpn and nport id */
529 rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
530 memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
531 memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
532 rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
533 fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
534 rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
540 __csio_reg_rnode(struct csio_rnode *rn)
542 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
543 struct csio_hw *hw = csio_lnode_to_hw(ln);
545 spin_unlock_irq(&hw->lock);
547 spin_lock_irq(&hw->lock);
549 if (rn->role & CSIO_RNFR_TARGET)
552 if (rn->nport_id == FC_FID_MGMT_SERV)
553 csio_ln_fdmi_start(ln, (void *) rn);
557 __csio_unreg_rnode(struct csio_rnode *rn)
559 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
560 struct csio_hw *hw = csio_lnode_to_hw(ln);
564 if (!list_empty(&rn->host_cmpl_q)) {
565 csio_dbg(hw, "Returning completion queue I/Os\n");
566 list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
570 if (rn->role & CSIO_RNFR_TARGET) {
572 ln->last_scan_ntgts--;
575 spin_unlock_irq(&hw->lock);
576 csio_unreg_rnode(rn);
577 spin_lock_irq(&hw->lock);
579 /* Cleanup I/Os that were waiting for rnode to unregister */
581 csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
585 /*****************************************************************************/
586 /* START: Rnode SM */
587 /*****************************************************************************/
596 csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
598 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
601 CSIO_INC_STATS(rn, n_evt_sm[evt]);
604 case CSIO_RNFE_LOGGED_IN:
605 case CSIO_RNFE_PLOGI_RECV:
606 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
608 csio_set_state(&rn->sm, csio_rns_ready);
609 __csio_reg_rnode(rn);
611 CSIO_INC_STATS(rn, n_err_inval);
614 case CSIO_RNFE_LOGO_RECV:
616 "ssni:x%x Ignoring event %d recv "
617 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
618 CSIO_INC_STATS(rn, n_evt_drop);
622 "ssni:x%x unexp event %d recv "
623 "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
624 CSIO_INC_STATS(rn, n_evt_unexp);
636 csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
638 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
641 CSIO_INC_STATS(rn, n_evt_sm[evt]);
644 case CSIO_RNFE_LOGGED_IN:
645 case CSIO_RNFE_PLOGI_RECV:
647 "ssni:x%x Ignoring event %d recv from did:x%x "
648 "in rn state[ready]\n", csio_rn_flowid(rn), evt,
650 CSIO_INC_STATS(rn, n_evt_drop);
653 case CSIO_RNFE_PRLI_DONE:
654 case CSIO_RNFE_PRLI_RECV:
655 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
657 __csio_reg_rnode(rn);
659 CSIO_INC_STATS(rn, n_err_inval);
663 csio_set_state(&rn->sm, csio_rns_offline);
664 __csio_unreg_rnode(rn);
666 /* FW expected to internally aborted outstanding SCSI WRs
667 * and return all SCSI WRs to host with status "ABORTED".
671 case CSIO_RNFE_LOGO_RECV:
672 csio_set_state(&rn->sm, csio_rns_offline);
674 __csio_unreg_rnode(rn);
676 /* FW expected to internally aborted outstanding SCSI WRs
677 * and return all SCSI WRs to host with status "ABORTED".
681 case CSIO_RNFE_CLOSE:
683 * Each rnode receives CLOSE event when driver is removed or
685 * Note: All outstanding IOs on remote port need to returned
686 * to uppper layer with appropriate error before sending
689 csio_set_state(&rn->sm, csio_rns_uninit);
690 __csio_unreg_rnode(rn);
693 case CSIO_RNFE_NAME_MISSING:
694 csio_set_state(&rn->sm, csio_rns_disappeared);
695 __csio_unreg_rnode(rn);
698 * FW expected to internally aborted outstanding SCSI WRs
699 * and return all SCSI WRs to host with status "ABORTED".
706 "ssni:x%x unexp event %d recv from did:x%x "
707 "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
709 CSIO_INC_STATS(rn, n_evt_unexp);
721 csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
723 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
726 CSIO_INC_STATS(rn, n_evt_sm[evt]);
729 case CSIO_RNFE_LOGGED_IN:
730 case CSIO_RNFE_PLOGI_RECV:
731 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
733 csio_set_state(&rn->sm, csio_rns_ready);
734 __csio_reg_rnode(rn);
736 CSIO_INC_STATS(rn, n_err_inval);
737 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
743 "ssni:x%x Ignoring event %d recv from did:x%x "
744 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
746 CSIO_INC_STATS(rn, n_evt_drop);
749 case CSIO_RNFE_CLOSE:
750 /* Each rnode receives CLOSE event when driver is removed or
752 * Note: All outstanding IOs on remote port need to returned
753 * to uppper layer with appropriate error before sending
756 csio_set_state(&rn->sm, csio_rns_uninit);
759 case CSIO_RNFE_NAME_MISSING:
760 csio_set_state(&rn->sm, csio_rns_disappeared);
765 "ssni:x%x unexp event %d recv from did:x%x "
766 "in rn state[offline]\n", csio_rn_flowid(rn), evt,
768 CSIO_INC_STATS(rn, n_evt_unexp);
774 * csio_rns_disappeared -
780 csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
782 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
785 CSIO_INC_STATS(rn, n_evt_sm[evt]);
788 case CSIO_RNFE_LOGGED_IN:
789 case CSIO_RNFE_PLOGI_RECV:
790 ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
792 csio_set_state(&rn->sm, csio_rns_ready);
793 __csio_reg_rnode(rn);
795 CSIO_INC_STATS(rn, n_err_inval);
796 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
800 case CSIO_RNFE_CLOSE:
801 /* Each rnode receives CLOSE event when driver is removed or
803 * Note: All outstanding IOs on remote port need to returned
804 * to uppper layer with appropriate error before sending
807 csio_set_state(&rn->sm, csio_rns_uninit);
811 case CSIO_RNFE_NAME_MISSING:
813 "ssni:x%x Ignoring event %d recv from did x%x"
814 "in rn state[disappeared]\n", csio_rn_flowid(rn),
820 "ssni:x%x unexp event %d recv from did x%x"
821 "in rn state[disappeared]\n", csio_rn_flowid(rn),
823 CSIO_INC_STATS(rn, n_evt_unexp);
828 /*****************************************************************************/
830 /*****************************************************************************/
833 * csio_rnode_devloss_handler - Device loss event handler
836 * Post event to close rnode SM and free rnode.
839 csio_rnode_devloss_handler(struct csio_rnode *rn)
841 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
843 /* ignore if same rnode came back as online */
844 if (csio_is_rnode_ready(rn))
847 csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
849 /* Free rn if in uninit state */
850 if (csio_is_rnode_uninit(rn))
851 csio_put_rnode(ln, rn);
855 * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
860 csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
862 struct csio_lnode *ln = csio_rnode_to_lnode(rn);
865 evt = CSIO_FWE_TO_RNFE(fwevt);
867 csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
868 csio_rn_flowid(rn), fwevt);
869 CSIO_INC_STATS(rn, n_evt_unexp);
872 CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
874 /* Track previous & current events for debugging */
875 rn->prev_evt = rn->cur_evt;
878 /* Post event to rnode SM */
879 csio_post_event(&rn->sm, evt);
881 /* Free rn if in uninit state */
882 if (csio_is_rnode_uninit(rn))
883 csio_put_rnode(ln, rn);
887 * csio_rnode_init - Initialize rnode.
889 * @ln: Associated lnode
891 * Caller is responsible for holding the lock. The lock is required
892 * to be held for inserting the rnode in ln->rnhead list.
895 csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
897 csio_rnode_to_lnode(rn) = ln;
898 csio_init_state(&rn->sm, csio_rns_uninit);
899 INIT_LIST_HEAD(&rn->host_cmpl_q);
900 csio_rn_flowid(rn) = CSIO_INVALID_IDX;
902 /* Add rnode to list of lnodes->rnhead */
903 list_add_tail(&rn->sm.sm_list, &ln->rnhead);
909 csio_rnode_exit(struct csio_rnode *rn)
911 list_del_init(&rn->sm.sm_list);
912 CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));