1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 #include <uapi/scsi/fc/fc_els.h>
10 #include <linux/delay.h>
11 #include <linux/overflow.h>
12 #include <linux/blk-cgroup.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
18 #include <scsi/scsi_transport_fc.h>
20 /* *************************** Data Structures/Defines ****************** */
23 enum nvme_fc_queue_flags {
24 NVME_FC_Q_CONNECTED = 0,
28 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
29 #define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects
30 * when connected and a
34 struct nvme_fc_queue {
35 struct nvme_fc_ctrl *ctrl;
37 struct blk_mq_hw_ctx *hctx;
39 size_t cmnd_capsule_len;
48 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
50 enum nvme_fcop_flags {
51 FCOP_FLAGS_TERMIO = (1 << 0),
52 FCOP_FLAGS_AEN = (1 << 1),
55 struct nvmefc_ls_req_op {
56 struct nvmefc_ls_req ls_req;
58 struct nvme_fc_rport *rport;
59 struct nvme_fc_queue *queue;
64 struct completion ls_done;
65 struct list_head lsreq_list; /* rport->ls_req_list */
69 struct nvmefc_ls_rcv_op {
70 struct nvme_fc_rport *rport;
71 struct nvmefc_ls_rsp *lsrsp;
72 union nvmefc_ls_requests *rqstbuf;
73 union nvmefc_ls_responses *rspbuf;
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
78 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
80 enum nvme_fcpop_state {
81 FCPOP_STATE_UNINIT = 0,
83 FCPOP_STATE_ACTIVE = 2,
84 FCPOP_STATE_ABORTED = 3,
85 FCPOP_STATE_COMPLETE = 4,
88 struct nvme_fc_fcp_op {
89 struct nvme_request nreq; /*
92 * the 1st element in the
97 struct nvmefc_fcp_req fcp_req;
99 struct nvme_fc_ctrl *ctrl;
100 struct nvme_fc_queue *queue;
108 struct nvme_fc_cmd_iu cmd_iu;
109 struct nvme_fc_ersp_iu rsp_iu;
112 struct nvme_fcp_op_w_sgl {
113 struct nvme_fc_fcp_op op;
114 struct scatterlist sgl[NVME_INLINE_SG_CNT];
118 struct nvme_fc_lport {
119 struct nvme_fc_local_port localport;
122 struct list_head port_list; /* nvme_fc_port_list */
123 struct list_head endp_list;
124 struct device *dev; /* physical device for dma */
125 struct nvme_fc_port_template *ops;
127 atomic_t act_rport_cnt;
128 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
130 struct nvme_fc_rport {
131 struct nvme_fc_remote_port remoteport;
133 struct list_head endp_list; /* for lport->endp_list */
134 struct list_head ctrl_list;
135 struct list_head ls_req_list;
136 struct list_head ls_rcv_list;
137 struct list_head disc_list;
138 struct device *dev; /* physical device for dma */
139 struct nvme_fc_lport *lport;
142 atomic_t act_ctrl_cnt;
143 unsigned long dev_loss_end;
144 struct work_struct lsrcv_work;
145 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
147 /* fc_ctrl flags values - specified as bit positions */
148 #define ASSOC_ACTIVE 0
149 #define ASSOC_FAILED 1
150 #define FCCTRL_TERMIO 2
152 struct nvme_fc_ctrl {
154 struct nvme_fc_queue *queues;
156 struct nvme_fc_lport *lport;
157 struct nvme_fc_rport *rport;
162 struct nvmefc_ls_rcv_op *rcv_disconn;
164 struct list_head ctrl_list; /* rport->ctrl_list */
166 struct blk_mq_tag_set admin_tag_set;
167 struct blk_mq_tag_set tag_set;
169 struct work_struct ioerr_work;
170 struct delayed_work connect_work;
175 wait_queue_head_t ioabort_wait;
177 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
179 struct nvme_ctrl ctrl;
182 static inline struct nvme_fc_ctrl *
183 to_fc_ctrl(struct nvme_ctrl *ctrl)
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
188 static inline struct nvme_fc_lport *
189 localport_to_lport(struct nvme_fc_local_port *portptr)
191 return container_of(portptr, struct nvme_fc_lport, localport);
194 static inline struct nvme_fc_rport *
195 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
197 return container_of(portptr, struct nvme_fc_rport, remoteport);
200 static inline struct nvmefc_ls_req_op *
201 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
203 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
206 static inline struct nvme_fc_fcp_op *
207 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
209 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
214 /* *************************** Globals **************************** */
217 static DEFINE_SPINLOCK(nvme_fc_lock);
219 static LIST_HEAD(nvme_fc_lport_list);
220 static DEFINE_IDA(nvme_fc_local_port_cnt);
221 static DEFINE_IDA(nvme_fc_ctrl_cnt);
223 static struct workqueue_struct *nvme_fc_wq;
225 static bool nvme_fc_waiting_to_unload;
226 static DECLARE_COMPLETION(nvme_fc_unload_proceed);
229 * These items are short-term. They will eventually be moved into
230 * a generic FC class. See comments in module init.
232 static struct device *fc_udev_device;
234 static void nvme_fc_complete_rq(struct request *rq);
236 /* *********************** FC-NVME Port Management ************************ */
238 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
239 struct nvme_fc_queue *, unsigned int);
241 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);
245 nvme_fc_free_lport(struct kref *ref)
247 struct nvme_fc_lport *lport =
248 container_of(ref, struct nvme_fc_lport, ref);
251 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
252 WARN_ON(!list_empty(&lport->endp_list));
254 /* remove from transport list */
255 spin_lock_irqsave(&nvme_fc_lock, flags);
256 list_del(&lport->port_list);
257 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list))
258 complete(&nvme_fc_unload_proceed);
259 spin_unlock_irqrestore(&nvme_fc_lock, flags);
261 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
262 ida_destroy(&lport->endp_cnt);
264 put_device(lport->dev);
270 nvme_fc_lport_put(struct nvme_fc_lport *lport)
272 kref_put(&lport->ref, nvme_fc_free_lport);
276 nvme_fc_lport_get(struct nvme_fc_lport *lport)
278 return kref_get_unless_zero(&lport->ref);
282 static struct nvme_fc_lport *
283 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
284 struct nvme_fc_port_template *ops,
287 struct nvme_fc_lport *lport;
290 spin_lock_irqsave(&nvme_fc_lock, flags);
292 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
293 if (lport->localport.node_name != pinfo->node_name ||
294 lport->localport.port_name != pinfo->port_name)
297 if (lport->dev != dev) {
298 lport = ERR_PTR(-EXDEV);
302 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
303 lport = ERR_PTR(-EEXIST);
307 if (!nvme_fc_lport_get(lport)) {
309 * fails if ref cnt already 0. If so,
310 * act as if lport already deleted
316 /* resume the lport */
319 lport->localport.port_role = pinfo->port_role;
320 lport->localport.port_id = pinfo->port_id;
321 lport->localport.port_state = FC_OBJSTATE_ONLINE;
323 spin_unlock_irqrestore(&nvme_fc_lock, flags);
331 spin_unlock_irqrestore(&nvme_fc_lock, flags);
337 * nvme_fc_register_localport - transport entry point called by an
338 * LLDD to register the existence of a NVME
340 * @pinfo: pointer to information about the port to be registered
341 * @template: LLDD entrypoints and operational parameters for the port
342 * @dev: physical hardware device node port corresponds to. Will be
343 * used for DMA mappings
344 * @portptr: pointer to a local port pointer. Upon success, the routine
345 * will allocate a nvme_fc_local_port structure and place its
346 * address in the local port pointer. Upon failure, local port
347 * pointer will be set to 0.
350 * a completion status. Must be 0 upon success; a negative errno
351 * (ex: -ENXIO) upon failure.
354 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
355 struct nvme_fc_port_template *template,
357 struct nvme_fc_local_port **portptr)
359 struct nvme_fc_lport *newrec;
363 if (!template->localport_delete || !template->remoteport_delete ||
364 !template->ls_req || !template->fcp_io ||
365 !template->ls_abort || !template->fcp_abort ||
366 !template->max_hw_queues || !template->max_sgl_segments ||
367 !template->max_dif_sgl_segments || !template->dma_boundary) {
369 goto out_reghost_failed;
373 * look to see if there is already a localport that had been
374 * deregistered and in the process of waiting for all the
375 * references to fully be removed. If the references haven't
376 * expired, we can simply re-enable the localport. Remoteports
377 * and controller reconnections should resume naturally.
379 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
381 /* found an lport, but something about its state is bad */
382 if (IS_ERR(newrec)) {
383 ret = PTR_ERR(newrec);
384 goto out_reghost_failed;
386 /* found existing lport, which was resumed */
388 *portptr = &newrec->localport;
392 /* nothing found - allocate a new localport struct */
394 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
398 goto out_reghost_failed;
401 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
407 if (!get_device(dev) && dev) {
412 INIT_LIST_HEAD(&newrec->port_list);
413 INIT_LIST_HEAD(&newrec->endp_list);
414 kref_init(&newrec->ref);
415 atomic_set(&newrec->act_rport_cnt, 0);
416 newrec->ops = template;
418 ida_init(&newrec->endp_cnt);
419 if (template->local_priv_sz)
420 newrec->localport.private = &newrec[1];
422 newrec->localport.private = NULL;
423 newrec->localport.node_name = pinfo->node_name;
424 newrec->localport.port_name = pinfo->port_name;
425 newrec->localport.port_role = pinfo->port_role;
426 newrec->localport.port_id = pinfo->port_id;
427 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
428 newrec->localport.port_num = idx;
430 spin_lock_irqsave(&nvme_fc_lock, flags);
431 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
432 spin_unlock_irqrestore(&nvme_fc_lock, flags);
435 dma_set_seg_boundary(dev, template->dma_boundary);
437 *portptr = &newrec->localport;
441 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
449 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
452 * nvme_fc_unregister_localport - transport entry point called by an
453 * LLDD to deregister/remove a previously
454 * registered a NVME host FC port.
455 * @portptr: pointer to the (registered) local port that is to be deregistered.
458 * a completion status. Must be 0 upon success; a negative errno
459 * (ex: -ENXIO) upon failure.
462 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
464 struct nvme_fc_lport *lport = localport_to_lport(portptr);
470 spin_lock_irqsave(&nvme_fc_lock, flags);
472 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
473 spin_unlock_irqrestore(&nvme_fc_lock, flags);
476 portptr->port_state = FC_OBJSTATE_DELETED;
478 spin_unlock_irqrestore(&nvme_fc_lock, flags);
480 if (atomic_read(&lport->act_rport_cnt) == 0)
481 lport->ops->localport_delete(&lport->localport);
483 nvme_fc_lport_put(lport);
487 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
490 * TRADDR strings, per FC-NVME are fixed format:
491 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
492 * udev event will only differ by prefix of what field is
494 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
495 * 19 + 43 + null_fudge = 64 characters
497 #define FCNVME_TRADDR_LENGTH 64
500 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
501 struct nvme_fc_rport *rport)
503 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
504 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
505 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
507 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
510 snprintf(hostaddr, sizeof(hostaddr),
511 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
512 lport->localport.node_name, lport->localport.port_name);
513 snprintf(tgtaddr, sizeof(tgtaddr),
514 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
515 rport->remoteport.node_name, rport->remoteport.port_name);
516 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
520 nvme_fc_free_rport(struct kref *ref)
522 struct nvme_fc_rport *rport =
523 container_of(ref, struct nvme_fc_rport, ref);
524 struct nvme_fc_lport *lport =
525 localport_to_lport(rport->remoteport.localport);
528 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
529 WARN_ON(!list_empty(&rport->ctrl_list));
531 /* remove from lport list */
532 spin_lock_irqsave(&nvme_fc_lock, flags);
533 list_del(&rport->endp_list);
534 spin_unlock_irqrestore(&nvme_fc_lock, flags);
536 WARN_ON(!list_empty(&rport->disc_list));
537 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
541 nvme_fc_lport_put(lport);
545 nvme_fc_rport_put(struct nvme_fc_rport *rport)
547 kref_put(&rport->ref, nvme_fc_free_rport);
551 nvme_fc_rport_get(struct nvme_fc_rport *rport)
553 return kref_get_unless_zero(&rport->ref);
557 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
559 switch (ctrl->ctrl.state) {
561 case NVME_CTRL_CONNECTING:
563 * As all reconnects were suppressed, schedule a
566 dev_info(ctrl->ctrl.device,
567 "NVME-FC{%d}: connectivity re-established. "
568 "Attempting reconnect\n", ctrl->cnum);
570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
573 case NVME_CTRL_RESETTING:
575 * Controller is already in the process of terminating the
576 * association. No need to do anything further. The reconnect
577 * step will naturally occur after the reset completes.
582 /* no action to take - let it delete */
587 static struct nvme_fc_rport *
588 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
589 struct nvme_fc_port_info *pinfo)
591 struct nvme_fc_rport *rport;
592 struct nvme_fc_ctrl *ctrl;
595 spin_lock_irqsave(&nvme_fc_lock, flags);
597 list_for_each_entry(rport, &lport->endp_list, endp_list) {
598 if (rport->remoteport.node_name != pinfo->node_name ||
599 rport->remoteport.port_name != pinfo->port_name)
602 if (!nvme_fc_rport_get(rport)) {
603 rport = ERR_PTR(-ENOLCK);
607 spin_unlock_irqrestore(&nvme_fc_lock, flags);
609 spin_lock_irqsave(&rport->lock, flags);
611 /* has it been unregistered */
612 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
613 /* means lldd called us twice */
614 spin_unlock_irqrestore(&rport->lock, flags);
615 nvme_fc_rport_put(rport);
616 return ERR_PTR(-ESTALE);
619 rport->remoteport.port_role = pinfo->port_role;
620 rport->remoteport.port_id = pinfo->port_id;
621 rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
622 rport->dev_loss_end = 0;
625 * kick off a reconnect attempt on all associations to the
626 * remote port. A successful reconnects will resume i/o.
628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
629 nvme_fc_resume_controller(ctrl);
631 spin_unlock_irqrestore(&rport->lock, flags);
639 spin_unlock_irqrestore(&nvme_fc_lock, flags);
645 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
646 struct nvme_fc_port_info *pinfo)
648 if (pinfo->dev_loss_tmo)
649 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
651 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
655 * nvme_fc_register_remoteport - transport entry point called by an
656 * LLDD to register the existence of a NVME
657 * subsystem FC port on its fabric.
658 * @localport: pointer to the (registered) local port that the remote
659 * subsystem port is connected to.
660 * @pinfo: pointer to information about the port to be registered
661 * @portptr: pointer to a remote port pointer. Upon success, the routine
662 * will allocate a nvme_fc_remote_port structure and place its
663 * address in the remote port pointer. Upon failure, remote port
664 * pointer will be set to 0.
667 * a completion status. Must be 0 upon success; a negative errno
668 * (ex: -ENXIO) upon failure.
671 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
672 struct nvme_fc_port_info *pinfo,
673 struct nvme_fc_remote_port **portptr)
675 struct nvme_fc_lport *lport = localport_to_lport(localport);
676 struct nvme_fc_rport *newrec;
680 if (!nvme_fc_lport_get(lport)) {
682 goto out_reghost_failed;
686 * look to see if there is already a remoteport that is waiting
687 * for a reconnect (within dev_loss_tmo) with the same WWN's.
688 * If so, transition to it and reconnect.
690 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
692 /* found an rport, but something about its state is bad */
693 if (IS_ERR(newrec)) {
694 ret = PTR_ERR(newrec);
697 /* found existing rport, which was resumed */
699 nvme_fc_lport_put(lport);
700 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
701 nvme_fc_signal_discovery_scan(lport, newrec);
702 *portptr = &newrec->remoteport;
706 /* nothing found - allocate a new remoteport struct */
708 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
715 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
718 goto out_kfree_rport;
721 INIT_LIST_HEAD(&newrec->endp_list);
722 INIT_LIST_HEAD(&newrec->ctrl_list);
723 INIT_LIST_HEAD(&newrec->ls_req_list);
724 INIT_LIST_HEAD(&newrec->disc_list);
725 kref_init(&newrec->ref);
726 atomic_set(&newrec->act_ctrl_cnt, 0);
727 spin_lock_init(&newrec->lock);
728 newrec->remoteport.localport = &lport->localport;
729 INIT_LIST_HEAD(&newrec->ls_rcv_list);
730 newrec->dev = lport->dev;
731 newrec->lport = lport;
732 if (lport->ops->remote_priv_sz)
733 newrec->remoteport.private = &newrec[1];
735 newrec->remoteport.private = NULL;
736 newrec->remoteport.port_role = pinfo->port_role;
737 newrec->remoteport.node_name = pinfo->node_name;
738 newrec->remoteport.port_name = pinfo->port_name;
739 newrec->remoteport.port_id = pinfo->port_id;
740 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
741 newrec->remoteport.port_num = idx;
742 __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
743 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work);
745 spin_lock_irqsave(&nvme_fc_lock, flags);
746 list_add_tail(&newrec->endp_list, &lport->endp_list);
747 spin_unlock_irqrestore(&nvme_fc_lock, flags);
749 nvme_fc_signal_discovery_scan(lport, newrec);
751 *portptr = &newrec->remoteport;
757 nvme_fc_lport_put(lport);
762 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
765 nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
767 struct nvmefc_ls_req_op *lsop;
771 spin_lock_irqsave(&rport->lock, flags);
773 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
774 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
775 lsop->flags |= FCOP_FLAGS_TERMIO;
776 spin_unlock_irqrestore(&rport->lock, flags);
777 rport->lport->ops->ls_abort(&rport->lport->localport,
783 spin_unlock_irqrestore(&rport->lock, flags);
789 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
791 dev_info(ctrl->ctrl.device,
792 "NVME-FC{%d}: controller connectivity lost. Awaiting "
793 "Reconnect", ctrl->cnum);
795 switch (ctrl->ctrl.state) {
799 * Schedule a controller reset. The reset will terminate the
800 * association and schedule the reconnect timer. Reconnects
801 * will be attempted until either the ctlr_loss_tmo
802 * (max_retries * connect_delay) expires or the remoteport's
803 * dev_loss_tmo expires.
805 if (nvme_reset_ctrl(&ctrl->ctrl)) {
806 dev_warn(ctrl->ctrl.device,
807 "NVME-FC{%d}: Couldn't schedule reset.\n",
809 nvme_delete_ctrl(&ctrl->ctrl);
813 case NVME_CTRL_CONNECTING:
815 * The association has already been terminated and the
816 * controller is attempting reconnects. No need to do anything
817 * futher. Reconnects will be attempted until either the
818 * ctlr_loss_tmo (max_retries * connect_delay) expires or the
819 * remoteport's dev_loss_tmo expires.
823 case NVME_CTRL_RESETTING:
825 * Controller is already in the process of terminating the
826 * association. No need to do anything further. The reconnect
827 * step will kick in naturally after the association is
832 case NVME_CTRL_DELETING:
833 case NVME_CTRL_DELETING_NOIO:
835 /* no action to take - let it delete */
841 * nvme_fc_unregister_remoteport - transport entry point called by an
842 * LLDD to deregister/remove a previously
843 * registered a NVME subsystem FC port.
844 * @portptr: pointer to the (registered) remote port that is to be
848 * a completion status. Must be 0 upon success; a negative errno
849 * (ex: -ENXIO) upon failure.
852 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
854 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
855 struct nvme_fc_ctrl *ctrl;
861 spin_lock_irqsave(&rport->lock, flags);
863 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
864 spin_unlock_irqrestore(&rport->lock, flags);
867 portptr->port_state = FC_OBJSTATE_DELETED;
869 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
872 /* if dev_loss_tmo==0, dev loss is immediate */
873 if (!portptr->dev_loss_tmo) {
874 dev_warn(ctrl->ctrl.device,
875 "NVME-FC{%d}: controller connectivity lost.\n",
877 nvme_delete_ctrl(&ctrl->ctrl);
879 nvme_fc_ctrl_connectivity_loss(ctrl);
882 spin_unlock_irqrestore(&rport->lock, flags);
884 nvme_fc_abort_lsops(rport);
886 if (atomic_read(&rport->act_ctrl_cnt) == 0)
887 rport->lport->ops->remoteport_delete(portptr);
890 * release the reference, which will allow, if all controllers
891 * go away, which should only occur after dev_loss_tmo occurs,
892 * for the rport to be torn down.
894 nvme_fc_rport_put(rport);
898 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
901 * nvme_fc_rescan_remoteport - transport entry point called by an
902 * LLDD to request a nvme device rescan.
903 * @remoteport: pointer to the (registered) remote port that is to be
909 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
911 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
913 nvme_fc_signal_discovery_scan(rport->lport, rport);
915 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
918 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
921 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
924 spin_lock_irqsave(&rport->lock, flags);
926 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
927 spin_unlock_irqrestore(&rport->lock, flags);
931 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
932 rport->remoteport.dev_loss_tmo = dev_loss_tmo;
934 spin_unlock_irqrestore(&rport->lock, flags);
938 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
941 /* *********************** FC-NVME DMA Handling **************************** */
944 * The fcloop device passes in a NULL device pointer. Real LLD's will
945 * pass in a valid device pointer. If NULL is passed to the dma mapping
946 * routines, depending on the platform, it may or may not succeed, and
950 * Wrapper all the dma routines and check the dev pointer.
952 * If simple mappings (return just a dma address, we'll noop them,
953 * returning a dma address of 0.
955 * On more complex mappings (dma_map_sg), a pseudo routine fills
956 * in the scatter list, setting all dma addresses to 0.
959 static inline dma_addr_t
960 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
961 enum dma_data_direction dir)
963 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
967 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
969 return dev ? dma_mapping_error(dev, dma_addr) : 0;
973 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
974 enum dma_data_direction dir)
977 dma_unmap_single(dev, addr, size, dir);
981 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
982 enum dma_data_direction dir)
985 dma_sync_single_for_cpu(dev, addr, size, dir);
989 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
990 enum dma_data_direction dir)
993 dma_sync_single_for_device(dev, addr, size, dir);
996 /* pseudo dma_map_sg call */
998 fc_map_sg(struct scatterlist *sg, int nents)
1000 struct scatterlist *s;
1003 WARN_ON(nents == 0 || sg[0].length == 0);
1005 for_each_sg(sg, s, nents, i) {
1006 s->dma_address = 0L;
1007 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1008 s->dma_length = s->length;
1015 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1016 enum dma_data_direction dir)
1018 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
1022 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1023 enum dma_data_direction dir)
1026 dma_unmap_sg(dev, sg, nents, dir);
1029 /* *********************** FC-NVME LS Handling **************************** */
1031 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
1032 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
1034 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1037 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1039 struct nvme_fc_rport *rport = lsop->rport;
1040 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1041 unsigned long flags;
1043 spin_lock_irqsave(&rport->lock, flags);
1045 if (!lsop->req_queued) {
1046 spin_unlock_irqrestore(&rport->lock, flags);
1050 list_del(&lsop->lsreq_list);
1052 lsop->req_queued = false;
1054 spin_unlock_irqrestore(&rport->lock, flags);
1056 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1057 (lsreq->rqstlen + lsreq->rsplen),
1060 nvme_fc_rport_put(rport);
1064 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
1065 struct nvmefc_ls_req_op *lsop,
1066 void (*done)(struct nvmefc_ls_req *req, int status))
1068 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1069 unsigned long flags;
1072 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1073 return -ECONNREFUSED;
1075 if (!nvme_fc_rport_get(rport))
1079 lsop->rport = rport;
1080 lsop->req_queued = false;
1081 INIT_LIST_HEAD(&lsop->lsreq_list);
1082 init_completion(&lsop->ls_done);
1084 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
1085 lsreq->rqstlen + lsreq->rsplen,
1087 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
1091 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
1093 spin_lock_irqsave(&rport->lock, flags);
1095 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1097 lsop->req_queued = true;
1099 spin_unlock_irqrestore(&rport->lock, flags);
1101 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1102 &rport->remoteport, lsreq);
1109 lsop->ls_error = ret;
1110 spin_lock_irqsave(&rport->lock, flags);
1111 lsop->req_queued = false;
1112 list_del(&lsop->lsreq_list);
1113 spin_unlock_irqrestore(&rport->lock, flags);
1114 fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
1115 (lsreq->rqstlen + lsreq->rsplen),
1118 nvme_fc_rport_put(rport);
1124 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
1126 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1128 lsop->ls_error = status;
1129 complete(&lsop->ls_done);
1133 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1135 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1136 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
1139 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1143 * No timeout/not interruptible as we need the struct
1144 * to exist until the lldd calls us back. Thus mandate
1145 * wait until driver calls back. lldd responsible for
1146 * the timeout action
1148 wait_for_completion(&lsop->ls_done);
1150 __nvme_fc_finish_ls_req(lsop);
1152 ret = lsop->ls_error;
1158 /* ACC or RJT payload ? */
1159 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
1166 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
1167 struct nvmefc_ls_req_op *lsop,
1168 void (*done)(struct nvmefc_ls_req *req, int status))
1170 /* don't wait for completion */
1172 return __nvme_fc_send_ls_req(rport, lsop, done);
1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1177 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
1179 struct nvmefc_ls_req_op *lsop;
1180 struct nvmefc_ls_req *lsreq;
1181 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
1182 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
1183 unsigned long flags;
1186 lsop = kzalloc((sizeof(*lsop) +
1187 sizeof(*assoc_rqst) + sizeof(*assoc_acc) +
1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1190 dev_info(ctrl->ctrl.device,
1191 "NVME-FC{%d}: send Create Association failed: ENOMEM\n",
1197 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1198 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
1199 lsreq = &lsop->ls_req;
1200 if (ctrl->lport->ops->lsrqst_priv_sz)
1201 lsreq->private = &assoc_acc[1];
1203 lsreq->private = NULL;
1205 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
1206 assoc_rqst->desc_list_len =
1207 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1209 assoc_rqst->assoc_cmd.desc_tag =
1210 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
1211 assoc_rqst->assoc_cmd.desc_len =
1213 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
1215 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1216 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
1217 /* Linux supports only Dynamic controllers */
1218 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1221 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1223 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
1225 lsop->queue = queue;
1226 lsreq->rqstaddr = assoc_rqst;
1227 lsreq->rqstlen = sizeof(*assoc_rqst);
1228 lsreq->rspaddr = assoc_acc;
1229 lsreq->rsplen = sizeof(*assoc_acc);
1230 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1234 goto out_free_buffer;
1236 /* process connect LS completion */
1238 /* validate the ACC response */
1239 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1241 else if (assoc_acc->hdr.desc_list_len !=
1243 sizeof(struct fcnvme_ls_cr_assoc_acc)))
1244 fcret = VERR_CR_ASSOC_ACC_LEN;
1245 else if (assoc_acc->hdr.rqst.desc_tag !=
1246 cpu_to_be32(FCNVME_LSDESC_RQST))
1247 fcret = VERR_LSDESC_RQST;
1248 else if (assoc_acc->hdr.rqst.desc_len !=
1249 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1250 fcret = VERR_LSDESC_RQST_LEN;
1251 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
1252 fcret = VERR_CR_ASSOC;
1253 else if (assoc_acc->associd.desc_tag !=
1254 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1255 fcret = VERR_ASSOC_ID;
1256 else if (assoc_acc->associd.desc_len !=
1258 sizeof(struct fcnvme_lsdesc_assoc_id)))
1259 fcret = VERR_ASSOC_ID_LEN;
1260 else if (assoc_acc->connectid.desc_tag !=
1261 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1262 fcret = VERR_CONN_ID;
1263 else if (assoc_acc->connectid.desc_len !=
1264 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1265 fcret = VERR_CONN_ID_LEN;
1270 "q %d Create Association LS failed: %s\n",
1271 queue->qnum, validation_errors[fcret]);
1273 spin_lock_irqsave(&ctrl->lock, flags);
1274 ctrl->association_id =
1275 be64_to_cpu(assoc_acc->associd.association_id);
1276 queue->connection_id =
1277 be64_to_cpu(assoc_acc->connectid.connection_id);
1278 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1279 spin_unlock_irqrestore(&ctrl->lock, flags);
1287 "queue %d connect admin queue failed (%d).\n",
1293 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1294 u16 qsize, u16 ersp_ratio)
1296 struct nvmefc_ls_req_op *lsop;
1297 struct nvmefc_ls_req *lsreq;
1298 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
1299 struct fcnvme_ls_cr_conn_acc *conn_acc;
1302 lsop = kzalloc((sizeof(*lsop) +
1303 sizeof(*conn_rqst) + sizeof(*conn_acc) +
1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1306 dev_info(ctrl->ctrl.device,
1307 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n",
1313 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1314 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
1315 lsreq = &lsop->ls_req;
1316 if (ctrl->lport->ops->lsrqst_priv_sz)
1317 lsreq->private = (void *)&conn_acc[1];
1319 lsreq->private = NULL;
1321 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
1322 conn_rqst->desc_list_len = cpu_to_be32(
1323 sizeof(struct fcnvme_lsdesc_assoc_id) +
1324 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1326 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1327 conn_rqst->associd.desc_len =
1329 sizeof(struct fcnvme_lsdesc_assoc_id));
1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1331 conn_rqst->connect_cmd.desc_tag =
1332 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
1333 conn_rqst->connect_cmd.desc_len =
1335 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
1336 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
1337 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
1338 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
1340 lsop->queue = queue;
1341 lsreq->rqstaddr = conn_rqst;
1342 lsreq->rqstlen = sizeof(*conn_rqst);
1343 lsreq->rspaddr = conn_acc;
1344 lsreq->rsplen = sizeof(*conn_acc);
1345 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC;
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1349 goto out_free_buffer;
1351 /* process connect LS completion */
1353 /* validate the ACC response */
1354 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
1356 else if (conn_acc->hdr.desc_list_len !=
1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
1358 fcret = VERR_CR_CONN_ACC_LEN;
1359 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
1360 fcret = VERR_LSDESC_RQST;
1361 else if (conn_acc->hdr.rqst.desc_len !=
1362 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
1363 fcret = VERR_LSDESC_RQST_LEN;
1364 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
1365 fcret = VERR_CR_CONN;
1366 else if (conn_acc->connectid.desc_tag !=
1367 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
1368 fcret = VERR_CONN_ID;
1369 else if (conn_acc->connectid.desc_len !=
1370 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
1371 fcret = VERR_CONN_ID_LEN;
1376 "q %d Create I/O Connection LS failed: %s\n",
1377 queue->qnum, validation_errors[fcret]);
1379 queue->connection_id =
1380 be64_to_cpu(conn_acc->connectid.connection_id);
1381 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1389 "queue %d connect I/O queue failed (%d).\n",
1395 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
1397 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1399 __nvme_fc_finish_ls_req(lsop);
1401 /* fc-nvme initiator doesn't care about success or failure of cmd */
1407 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1408 * the FC-NVME Association. Terminating the association also
1409 * terminates the FC-NVME connections (per queue, both admin and io
1410 * queues) that are part of the association. E.g. things are torn
1411 * down, and the related FC-NVME Association ID and Connection IDs
1414 * The behavior of the fc-nvme initiator is such that it's
1415 * understanding of the association and connections will implicitly
1416 * be torn down. The action is implicit as it may be due to a loss of
1417 * connectivity with the fc-nvme target, so you may never get a
1418 * response even if you tried. As such, the action of this routine
1419 * is to asynchronously send the LS, ignore any results of the LS, and
1420 * continue on with terminating the association. If the fc-nvme target
1421 * is present and receives the LS, it too can tear down.
1424 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1426 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
1427 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
1428 struct nvmefc_ls_req_op *lsop;
1429 struct nvmefc_ls_req *lsreq;
1432 lsop = kzalloc((sizeof(*lsop) +
1433 sizeof(*discon_rqst) + sizeof(*discon_acc) +
1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1436 dev_info(ctrl->ctrl.device,
1437 "NVME-FC{%d}: send Disconnect Association "
1443 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1444 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
1445 lsreq = &lsop->ls_req;
1446 if (ctrl->lport->ops->lsrqst_priv_sz)
1447 lsreq->private = (void *)&discon_acc[1];
1449 lsreq->private = NULL;
1451 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
1452 ctrl->association_id);
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1455 nvme_fc_disconnect_assoc_done);
1461 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1463 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1464 struct nvme_fc_rport *rport = lsop->rport;
1465 struct nvme_fc_lport *lport = rport->lport;
1466 unsigned long flags;
1468 spin_lock_irqsave(&rport->lock, flags);
1469 list_del(&lsop->lsrcv_list);
1470 spin_unlock_irqrestore(&rport->lock, flags);
1472 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1473 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1474 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1475 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1479 nvme_fc_rport_put(rport);
1483 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1485 struct nvme_fc_rport *rport = lsop->rport;
1486 struct nvme_fc_lport *lport = rport->lport;
1487 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1490 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1491 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1493 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1496 dev_warn(lport->dev,
1497 "LLDD rejected LS RSP xmt: LS %d status %d\n",
1499 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1504 static struct nvme_fc_ctrl *
1505 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
1506 struct nvmefc_ls_rcv_op *lsop)
1508 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1509 &lsop->rqstbuf->rq_dis_assoc;
1510 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1511 struct nvmefc_ls_rcv_op *oldls = NULL;
1512 u64 association_id = be64_to_cpu(rqst->associd.association_id);
1513 unsigned long flags;
1515 spin_lock_irqsave(&rport->lock, flags);
1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1518 if (!nvme_fc_ctrl_get(ctrl))
1520 spin_lock(&ctrl->lock);
1521 if (association_id == ctrl->association_id) {
1522 oldls = ctrl->rcv_disconn;
1523 ctrl->rcv_disconn = lsop;
1526 spin_unlock(&ctrl->lock);
1528 /* leave the ctrl get reference */
1530 nvme_fc_ctrl_put(ctrl);
1533 spin_unlock_irqrestore(&rport->lock, flags);
1535 /* transmit a response for anything that was pending */
1537 dev_info(rport->lport->dev,
1538 "NVME-FC{%d}: Multiple Disconnect Association "
1539 "LS's received\n", ctrl->cnum);
1540 /* overwrite good response with bogus failure */
1541 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1542 sizeof(*oldls->rspbuf),
1545 FCNVME_RJT_EXP_NONE, 0);
1546 nvme_fc_xmt_ls_rsp(oldls);
1553 * returns true to mean LS handled and ls_rsp can be sent
1554 * returns false to defer ls_rsp xmt (will be done as part of
1555 * association termination)
1558 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1560 struct nvme_fc_rport *rport = lsop->rport;
1561 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1562 &lsop->rqstbuf->rq_dis_assoc;
1563 struct fcnvme_ls_disconnect_assoc_acc *acc =
1564 &lsop->rspbuf->rsp_dis_assoc;
1565 struct nvme_fc_ctrl *ctrl = NULL;
1568 memset(acc, 0, sizeof(*acc));
1570 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1572 /* match an active association */
1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1575 ret = VERR_NO_ASSOC;
1579 dev_info(rport->lport->dev,
1580 "Disconnect LS failed: %s\n",
1581 validation_errors[ret]);
1582 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1583 sizeof(*acc), rqst->w0.ls_cmd,
1584 (ret == VERR_NO_ASSOC) ?
1585 FCNVME_RJT_RC_INV_ASSOC :
1586 FCNVME_RJT_RC_LOGIC,
1587 FCNVME_RJT_EXP_NONE, 0);
1591 /* format an ACCept response */
1593 lsop->lsrsp->rsplen = sizeof(*acc);
1595 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1597 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1598 FCNVME_LS_DISCONNECT_ASSOC);
1601 * the transmit of the response will occur after the exchanges
1602 * for the association have been ABTS'd by
1603 * nvme_fc_delete_association().
1606 /* fail the association */
1607 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1609 /* release the reference taken by nvme_fc_match_disconn_ls() */
1610 nvme_fc_ctrl_put(ctrl);
1616 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1617 * returns true if a response should be sent afterward, false if rsp will
1618 * be sent asynchronously.
1621 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1623 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1626 lsop->lsrsp->nvme_fc_private = lsop;
1627 lsop->lsrsp->rspbuf = lsop->rspbuf;
1628 lsop->lsrsp->rspdma = lsop->rspdma;
1629 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1630 /* Be preventative. handlers will later set to valid length */
1631 lsop->lsrsp->rsplen = 0;
1635 * parse request input, execute the request, and format the
1638 switch (w0->ls_cmd) {
1639 case FCNVME_LS_DISCONNECT_ASSOC:
1640 ret = nvme_fc_ls_disconnect_assoc(lsop);
1642 case FCNVME_LS_DISCONNECT_CONN:
1643 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1644 sizeof(*lsop->rspbuf), w0->ls_cmd,
1645 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0);
1647 case FCNVME_LS_CREATE_ASSOCIATION:
1648 case FCNVME_LS_CREATE_CONNECTION:
1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1650 sizeof(*lsop->rspbuf), w0->ls_cmd,
1651 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0);
1654 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1655 sizeof(*lsop->rspbuf), w0->ls_cmd,
1656 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1664 nvme_fc_handle_ls_rqst_work(struct work_struct *work)
1666 struct nvme_fc_rport *rport =
1667 container_of(work, struct nvme_fc_rport, lsrcv_work);
1668 struct fcnvme_ls_rqst_w0 *w0;
1669 struct nvmefc_ls_rcv_op *lsop;
1670 unsigned long flags;
1675 spin_lock_irqsave(&rport->lock, flags);
1676 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1680 lsop->handled = true;
1681 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
1682 spin_unlock_irqrestore(&rport->lock, flags);
1683 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1685 spin_unlock_irqrestore(&rport->lock, flags);
1686 w0 = &lsop->rqstbuf->w0;
1687 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1689 sizeof(*lsop->rspbuf),
1692 FCNVME_RJT_EXP_NONE, 0);
1695 nvme_fc_xmt_ls_rsp(lsop);
1698 spin_unlock_irqrestore(&rport->lock, flags);
1702 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1703 * upon the reception of a NVME LS request.
1705 * The nvme-fc layer will copy payload to an internal structure for
1706 * processing. As such, upon completion of the routine, the LLDD may
1707 * immediately free/reuse the LS request buffer passed in the call.
1709 * If this routine returns error, the LLDD should abort the exchange.
1711 * @portptr: pointer to the (registered) remote port that the LS
1712 * was received from. The remoteport is associated with
1713 * a specific localport.
1714 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be
1715 * used to reference the exchange corresponding to the LS
1716 * when issuing an ls response.
1717 * @lsreqbuf: pointer to the buffer containing the LS Request
1718 * @lsreqbuf_len: length, in bytes, of the received LS request
1721 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
1722 struct nvmefc_ls_rsp *lsrsp,
1723 void *lsreqbuf, u32 lsreqbuf_len)
1725 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
1726 struct nvme_fc_lport *lport = rport->lport;
1727 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
1728 struct nvmefc_ls_rcv_op *lsop;
1729 unsigned long flags;
1732 nvme_fc_rport_get(rport);
1734 /* validate there's a routine to transmit a response */
1735 if (!lport->ops->xmt_ls_rsp) {
1736 dev_info(lport->dev,
1737 "RCV %s LS failed: no LLDD xmt_ls_rsp\n",
1738 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1739 nvmefc_ls_names[w0->ls_cmd] : "");
1744 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
1745 dev_info(lport->dev,
1746 "RCV %s LS failed: payload too large\n",
1747 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1748 nvmefc_ls_names[w0->ls_cmd] : "");
1753 lsop = kzalloc(sizeof(*lsop) +
1754 sizeof(union nvmefc_ls_requests) +
1755 sizeof(union nvmefc_ls_responses),
1758 dev_info(lport->dev,
1759 "RCV %s LS failed: No memory\n",
1760 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1761 nvmefc_ls_names[w0->ls_cmd] : "");
1765 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1766 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1769 sizeof(*lsop->rspbuf),
1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1772 dev_info(lport->dev,
1773 "RCV %s LS failed: DMA mapping failure\n",
1774 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
1775 nvmefc_ls_names[w0->ls_cmd] : "");
1780 lsop->rport = rport;
1781 lsop->lsrsp = lsrsp;
1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1784 lsop->rqstdatalen = lsreqbuf_len;
1786 spin_lock_irqsave(&rport->lock, flags);
1787 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) {
1788 spin_unlock_irqrestore(&rport->lock, flags);
1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1793 spin_unlock_irqrestore(&rport->lock, flags);
1795 schedule_work(&rport->lsrcv_work);
1800 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1805 nvme_fc_rport_put(rport);
1808 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req);
1811 /* *********************** NVME Ctrl Routines **************************** */
1814 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1815 struct nvme_fc_fcp_op *op)
1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1818 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1820 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1822 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1826 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
1827 unsigned int hctx_idx)
1829 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1831 return __nvme_fc_exit_request(set->driver_data, op);
1835 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1837 unsigned long flags;
1840 spin_lock_irqsave(&ctrl->lock, flags);
1841 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1842 if (opstate != FCPOP_STATE_ACTIVE)
1843 atomic_set(&op->state, opstate);
1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1845 op->flags |= FCOP_FLAGS_TERMIO;
1848 spin_unlock_irqrestore(&ctrl->lock, flags);
1850 if (opstate != FCPOP_STATE_ACTIVE)
1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1854 &ctrl->rport->remoteport,
1855 op->queue->lldd_handle,
1862 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1867 /* ensure we've initialized the ops once */
1868 if (!(aen_op->flags & FCOP_FLAGS_AEN))
1871 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
1872 __nvme_fc_abort_op(ctrl, aen_op);
1876 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1877 struct nvme_fc_fcp_op *op, int opstate)
1879 unsigned long flags;
1881 if (opstate == FCPOP_STATE_ABORTED) {
1882 spin_lock_irqsave(&ctrl->lock, flags);
1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1884 op->flags & FCOP_FLAGS_TERMIO) {
1886 wake_up(&ctrl->ioabort_wait);
1888 spin_unlock_irqrestore(&ctrl->lock, flags);
1893 nvme_fc_ctrl_ioerr_work(struct work_struct *work)
1895 struct nvme_fc_ctrl *ctrl =
1896 container_of(work, struct nvme_fc_ctrl, ioerr_work);
1898 nvme_fc_error_recovery(ctrl, "transport detected io error");
1902 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1904 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1905 struct request *rq = op->rq;
1906 struct nvmefc_fcp_req *freq = &op->fcp_req;
1907 struct nvme_fc_ctrl *ctrl = op->ctrl;
1908 struct nvme_fc_queue *queue = op->queue;
1909 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1910 struct nvme_command *sqe = &op->cmd_iu.sqe;
1911 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
1912 union nvme_result result;
1913 bool terminate_assoc = true;
1918 * The current linux implementation of a nvme controller
1919 * allocates a single tag set for all io queues and sizes
1920 * the io queues to fully hold all possible tags. Thus, the
1921 * implementation does not reference or care about the sqhd
1922 * value as it never needs to use the sqhd/sqtail pointers
1923 * for submission pacing.
1925 * This affects the FC-NVME implementation in two ways:
1926 * 1) As the value doesn't matter, we don't need to waste
1927 * cycles extracting it from ERSPs and stamping it in the
1928 * cases where the transport fabricates CQEs on successful
1930 * 2) The FC-NVME implementation requires that delivery of
1931 * ERSP completions are to go back to the nvme layer in order
1932 * relative to the rsn, such that the sqhd value will always
1933 * be "in order" for the nvme layer. As the nvme layer in
1934 * linux doesn't care about sqhd, there's no need to return
1938 * As the core nvme layer in linux currently does not look at
1939 * every field in the cqe - in cases where the FC transport must
1940 * fabricate a CQE, the following fields will not be set as they
1941 * are not referenced:
1942 * cqe.sqid, cqe.sqhd, cqe.command_id
1944 * Failure or error of an individual i/o, in a transport
1945 * detected fashion unrelated to the nvme completion status,
1946 * potentially cause the initiator and target sides to get out
1947 * of sync on SQ head/tail (aka outstanding io count allowed).
1948 * Per FC-NVME spec, failure of an individual command requires
1949 * the connection to be terminated, which in turn requires the
1950 * association to be terminated.
1953 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1956 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1958 if (opstate == FCPOP_STATE_ABORTED)
1959 status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
1960 else if (freq->status) {
1961 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1962 dev_info(ctrl->ctrl.device,
1963 "NVME-FC{%d}: io failed due to lldd error %d\n",
1964 ctrl->cnum, freq->status);
1968 * For the linux implementation, if we have an unsuccesful
1969 * status, they blk-mq layer can typically be called with the
1970 * non-zero status and the content of the cqe isn't important.
1976 * command completed successfully relative to the wire
1977 * protocol. However, validate anything received and
1978 * extract the status and result from the cqe (create it
1982 switch (freq->rcv_rsplen) {
1985 case NVME_FC_SIZEOF_ZEROS_RSP:
1987 * No response payload or 12 bytes of payload (which
1988 * should all be zeros) are considered successful and
1989 * no payload in the CQE by the transport.
1991 if (freq->transferred_length !=
1992 be32_to_cpu(op->cmd_iu.data_len)) {
1993 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
1994 dev_info(ctrl->ctrl.device,
1995 "NVME-FC{%d}: io failed due to bad transfer "
1996 "length: %d vs expected %d\n",
1997 ctrl->cnum, freq->transferred_length,
1998 be32_to_cpu(op->cmd_iu.data_len));
2004 case sizeof(struct nvme_fc_ersp_iu):
2006 * The ERSP IU contains a full completion with CQE.
2007 * Validate ERSP IU and look at cqe.
2009 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
2010 (freq->rcv_rsplen / 4) ||
2011 be32_to_cpu(op->rsp_iu.xfrd_len) !=
2012 freq->transferred_length ||
2013 op->rsp_iu.ersp_result ||
2014 sqe->common.command_id != cqe->command_id)) {
2015 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2016 dev_info(ctrl->ctrl.device,
2017 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: "
2018 "iu len %d, xfr len %d vs %d, status code "
2019 "%d, cmdid %d vs %d\n",
2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2021 be32_to_cpu(op->rsp_iu.xfrd_len),
2022 freq->transferred_length,
2023 op->rsp_iu.ersp_result,
2024 sqe->common.command_id,
2028 result = cqe->result;
2029 status = cqe->status;
2033 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
2034 dev_info(ctrl->ctrl.device,
2035 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu "
2037 ctrl->cnum, freq->rcv_rsplen);
2041 terminate_assoc = false;
2044 if (op->flags & FCOP_FLAGS_AEN) {
2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2047 atomic_set(&op->state, FCPOP_STATE_IDLE);
2048 op->flags = FCOP_FLAGS_AEN; /* clear other flags */
2049 nvme_fc_ctrl_put(ctrl);
2053 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2054 if (!nvme_try_complete_req(rq, status, result))
2055 nvme_fc_complete_rq(rq);
2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2063 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2064 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
2065 struct request *rq, u32 rqno)
2067 struct nvme_fcp_op_w_sgl *op_w_sgl =
2068 container_of(op, typeof(*op_w_sgl), op);
2069 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2072 memset(op, 0, sizeof(*op));
2073 op->fcp_req.cmdaddr = &op->cmd_iu;
2074 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
2075 op->fcp_req.rspaddr = &op->rsp_iu;
2076 op->fcp_req.rsplen = sizeof(op->rsp_iu);
2077 op->fcp_req.done = nvme_fc_fcpio_done;
2083 cmdiu->format_id = NVME_CMD_FORMAT_ID;
2084 cmdiu->fc_id = NVME_CMD_FC_ID;
2085 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
2087 cmdiu->rsv_cat = fccmnd_set_cat_css(0,
2088 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT));
2090 cmdiu->rsv_cat = fccmnd_set_cat_admin(0);
2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2093 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2096 "FCP Op failed - cmdiu dma mapping failed.\n");
2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2102 &op->rsp_iu, sizeof(op->rsp_iu),
2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2106 "FCP Op failed - rspiu dma mapping failed.\n");
2110 atomic_set(&op->state, FCPOP_STATE_IDLE);
2116 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
2117 unsigned int hctx_idx, unsigned int numa_node)
2119 struct nvme_fc_ctrl *ctrl = set->driver_data;
2120 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2128 op->op.fcp_req.first_sgl = op->sgl;
2129 op->op.fcp_req.private = &op->priv[0];
2130 nvme_req(rq)->ctrl = &ctrl->ctrl;
2131 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe;
2136 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2138 struct nvme_fc_fcp_op *aen_op;
2139 struct nvme_fc_cmd_iu *cmdiu;
2140 struct nvme_command *sqe;
2141 void *private = NULL;
2144 aen_op = ctrl->aen_ops;
2145 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2146 if (ctrl->lport->ops->fcprqst_priv_sz) {
2147 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2153 cmdiu = &aen_op->cmd_iu;
2155 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2156 aen_op, (struct request *)NULL,
2157 (NVME_AQ_BLK_MQ_DEPTH + i));
2163 aen_op->flags = FCOP_FLAGS_AEN;
2164 aen_op->fcp_req.private = private;
2166 memset(sqe, 0, sizeof(*sqe));
2167 sqe->common.opcode = nvme_admin_async_event;
2168 /* Note: core layer may overwrite the sqe.command_id value */
2169 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
2175 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2177 struct nvme_fc_fcp_op *aen_op;
2180 cancel_work_sync(&ctrl->ctrl.async_event_work);
2181 aen_op = ctrl->aen_ops;
2182 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
2183 __nvme_fc_exit_request(ctrl, aen_op);
2185 kfree(aen_op->fcp_req.private);
2186 aen_op->fcp_req.private = NULL;
2191 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2194 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2196 hctx->driver_data = queue;
2201 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2202 unsigned int hctx_idx)
2204 struct nvme_fc_ctrl *ctrl = data;
2206 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2212 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
2213 unsigned int hctx_idx)
2215 struct nvme_fc_ctrl *ctrl = data;
2217 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2223 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2225 struct nvme_fc_queue *queue;
2227 queue = &ctrl->queues[idx];
2228 memset(queue, 0, sizeof(*queue));
2231 atomic_set(&queue->csn, 0);
2232 queue->dev = ctrl->dev;
2235 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2237 queue->cmnd_capsule_len = sizeof(struct nvme_command);
2240 * Considered whether we should allocate buffers for all SQEs
2241 * and CQEs and dma map them - mapping their respective entries
2242 * into the request structures (kernel vm addr and dma address)
2243 * thus the driver could use the buffers/mappings directly.
2244 * It only makes sense if the LLDD would use them for its
2245 * messaging api. It's very unlikely most adapter api's would use
2246 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
2247 * structures were used instead.
2252 * This routine terminates a queue at the transport level.
2253 * The transport has already ensured that all outstanding ios on
2254 * the queue have been terminated.
2255 * The transport will send a Disconnect LS request to terminate
2256 * the queue's connection. Termination of the admin queue will also
2257 * terminate the association at the target.
2260 nvme_fc_free_queue(struct nvme_fc_queue *queue)
2262 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2265 clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2267 * Current implementation never disconnects a single queue.
2268 * It always terminates a whole association. So there is never
2269 * a disconnect(queue) LS sent to the target.
2272 queue->connection_id = 0;
2273 atomic_set(&queue->csn, 0);
2277 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2278 struct nvme_fc_queue *queue, unsigned int qidx)
2280 if (ctrl->lport->ops->delete_queue)
2281 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2282 queue->lldd_handle);
2283 queue->lldd_handle = NULL;
2287 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2291 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2292 nvme_fc_free_queue(&ctrl->queues[i]);
2296 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2297 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
2301 queue->lldd_handle = NULL;
2302 if (ctrl->lport->ops->create_queue)
2303 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2304 qidx, qsize, &queue->lldd_handle);
2310 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2312 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2315 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2316 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2320 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2322 struct nvme_fc_queue *queue = &ctrl->queues[1];
2325 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2326 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2335 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2340 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2344 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2345 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2349 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2353 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2360 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2364 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2365 nvme_fc_init_queue(ctrl, i);
2369 nvme_fc_ctrl_free(struct kref *ref)
2371 struct nvme_fc_ctrl *ctrl =
2372 container_of(ref, struct nvme_fc_ctrl, ref);
2373 unsigned long flags;
2375 if (ctrl->ctrl.tagset) {
2376 blk_cleanup_queue(ctrl->ctrl.connect_q);
2377 blk_mq_free_tag_set(&ctrl->tag_set);
2380 /* remove from rport list */
2381 spin_lock_irqsave(&ctrl->rport->lock, flags);
2382 list_del(&ctrl->ctrl_list);
2383 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2385 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
2386 blk_cleanup_queue(ctrl->ctrl.admin_q);
2387 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2388 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2390 kfree(ctrl->queues);
2392 put_device(ctrl->dev);
2393 nvme_fc_rport_put(ctrl->rport);
2395 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2396 if (ctrl->ctrl.opts)
2397 nvmf_free_options(ctrl->ctrl.opts);
2402 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2404 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2408 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2410 return kref_get_unless_zero(&ctrl->ref);
2414 * All accesses from nvme core layer done - can now free the
2415 * controller. Called after last nvme_put_ctrl() call
2418 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
2420 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2422 WARN_ON(nctrl != &ctrl->ctrl);
2424 nvme_fc_ctrl_put(ctrl);
2428 * This routine is used by the transport when it needs to find active
2429 * io on a queue that is to be terminated. The transport uses
2430 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2431 * this routine to kill them on a 1 by 1 basis.
2433 * As FC allocates FC exchange for each io, the transport must contact
2434 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2435 * After terminating the exchange the LLDD will call the transport's
2436 * normal io done path for the request, but it will have an aborted
2437 * status. The done path will return the io request back to the block
2438 * layer with an error status.
2441 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2443 struct nvme_ctrl *nctrl = data;
2444 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2445 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2447 op->nreq.flags |= NVME_REQ_CANCELLED;
2448 __nvme_fc_abort_op(ctrl, op);
2453 * This routine runs through all outstanding commands on the association
2454 * and aborts them. This routine is typically be called by the
2455 * delete_association routine. It is also called due to an error during
2456 * reconnect. In that scenario, it is most likely a command that initializes
2457 * the controller, including fabric Connect commands on io queues, that
2458 * may have timed out or failed thus the io must be killed for the connect
2459 * thread to see the error.
2462 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2467 * if aborting io, the queues are no longer good, mark them
2470 if (ctrl->ctrl.queue_count > 1) {
2471 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2472 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2474 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2477 * If io queues are present, stop them and terminate all outstanding
2478 * ios on them. As FC allocates FC exchange for each io, the
2479 * transport must contact the LLDD to terminate the exchange,
2480 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2481 * to tell us what io's are busy and invoke a transport routine
2482 * to kill them with the LLDD. After terminating the exchange
2483 * the LLDD will call the transport's normal io done path, but it
2484 * will have an aborted status. The done path will return the
2485 * io requests back to the block layer as part of normal completions
2486 * (but with error status).
2488 if (ctrl->ctrl.queue_count > 1) {
2489 nvme_stop_queues(&ctrl->ctrl);
2490 nvme_sync_io_queues(&ctrl->ctrl);
2491 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2492 nvme_fc_terminate_exchange, &ctrl->ctrl);
2493 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2495 nvme_start_queues(&ctrl->ctrl);
2499 * Other transports, which don't have link-level contexts bound
2500 * to sqe's, would try to gracefully shutdown the controller by
2501 * writing the registers for shutdown and polling (call
2502 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
2503 * just aborted and we will wait on those contexts, and given
2504 * there was no indication of how live the controlelr is on the
2505 * link, don't send more io to create more contexts for the
2506 * shutdown. Let the controller fail via keepalive failure if
2507 * its still present.
2511 * clean up the admin queue. Same thing as above.
2513 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
2514 blk_sync_queue(ctrl->ctrl.admin_q);
2515 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2516 nvme_fc_terminate_exchange, &ctrl->ctrl);
2517 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2521 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2524 * if an error (io timeout, etc) while (re)connecting, the remote
2525 * port requested terminating of the association (disconnect_ls)
2526 * or an error (timeout or abort) occurred on an io while creating
2527 * the controller. Abort any ios on the association and let the
2528 * create_association error path resolve things.
2530 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2531 __nvme_fc_abort_outstanding_ios(ctrl, true);
2532 set_bit(ASSOC_FAILED, &ctrl->flags);
2536 /* Otherwise, only proceed if in LIVE state - e.g. on first error */
2537 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2540 dev_warn(ctrl->ctrl.device,
2541 "NVME-FC{%d}: transport association event: %s\n",
2542 ctrl->cnum, errmsg);
2543 dev_warn(ctrl->ctrl.device,
2544 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2546 nvme_reset_ctrl(&ctrl->ctrl);
2549 static enum blk_eh_timer_return
2550 nvme_fc_timeout(struct request *rq, bool reserved)
2552 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2553 struct nvme_fc_ctrl *ctrl = op->ctrl;
2554 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2555 struct nvme_command *sqe = &cmdiu->sqe;
2558 * Attempt to abort the offending command. Command completion
2559 * will detect the aborted io and will fail the connection.
2561 dev_info(ctrl->ctrl.device,
2562 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: "
2564 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2565 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11);
2566 if (__nvme_fc_abort_op(ctrl, op))
2567 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2570 * the io abort has been initiated. Have the reset timer
2571 * restarted and the abort completion will complete the io
2572 * shortly. Avoids a synchronous wait while the abort finishes.
2574 return BLK_EH_RESET_TIMER;
2578 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2579 struct nvme_fc_fcp_op *op)
2581 struct nvmefc_fcp_req *freq = &op->fcp_req;
2586 if (!blk_rq_nr_phys_segments(rq))
2589 freq->sg_table.sgl = freq->first_sgl;
2590 ret = sg_alloc_table_chained(&freq->sg_table,
2591 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
2592 NVME_INLINE_SG_CNT);
2596 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
2597 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
2598 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2599 op->nents, rq_dma_dir(rq));
2600 if (unlikely(freq->sg_cnt <= 0)) {
2601 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2607 * TODO: blk_integrity_rq(rq) for DIF
2613 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2614 struct nvme_fc_fcp_op *op)
2616 struct nvmefc_fcp_req *freq = &op->fcp_req;
2621 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2624 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
2630 * In FC, the queue is a logical thing. At transport connect, the target
2631 * creates its "queue" and returns a handle that is to be given to the
2632 * target whenever it posts something to the corresponding SQ. When an
2633 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
2634 * command contained within the SQE, an io, and assigns a FC exchange
2635 * to it. The SQE and the associated SQ handle are sent in the initial
2636 * CMD IU sents on the exchange. All transfers relative to the io occur
2637 * as part of the exchange. The CQE is the last thing for the io,
2638 * which is transferred (explicitly or implicitly) with the RSP IU
2639 * sent on the exchange. After the CQE is received, the FC exchange is
2640 * terminaed and the Exchange may be used on a different io.
2642 * The transport to LLDD api has the transport making a request for a
2643 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
2644 * resource and transfers the command. The LLDD will then process all
2645 * steps to complete the io. Upon completion, the transport done routine
2648 * So - while the operation is outstanding to the LLDD, there is a link
2649 * level FC exchange resource that is also outstanding. This must be
2650 * considered in all cleanup operations.
2653 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2654 struct nvme_fc_fcp_op *op, u32 data_len,
2655 enum nvmefc_fcp_datadir io_dir)
2657 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
2658 struct nvme_command *sqe = &cmdiu->sqe;
2662 * before attempting to send the io, check to see if we believe
2663 * the target device is present
2665 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2666 return BLK_STS_RESOURCE;
2668 if (!nvme_fc_ctrl_get(ctrl))
2669 return BLK_STS_IOERR;
2671 /* format the FC-NVME CMD IU and fcp_req */
2672 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
2673 cmdiu->data_len = cpu_to_be32(data_len);
2675 case NVMEFC_FCP_WRITE:
2676 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
2678 case NVMEFC_FCP_READ:
2679 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
2681 case NVMEFC_FCP_NODATA:
2685 op->fcp_req.payload_length = data_len;
2686 op->fcp_req.io_dir = io_dir;
2687 op->fcp_req.transferred_length = 0;
2688 op->fcp_req.rcv_rsplen = 0;
2689 op->fcp_req.status = NVME_SC_SUCCESS;
2690 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
2693 * validate per fabric rules, set fields mandated by fabric spec
2694 * as well as those by FC-NVME spec.
2696 WARN_ON_ONCE(sqe->common.metadata);
2697 sqe->common.flags |= NVME_CMD_SGL_METABUF;
2700 * format SQE DPTR field per FC-NVME rules:
2701 * type=0x5 Transport SGL Data Block Descriptor
2702 * subtype=0xA Transport-specific value
2704 * length=length of the data series
2706 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2707 NVME_SGL_FMT_TRANSPORT_A;
2708 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2709 sqe->rw.dptr.sgl.addr = 0;
2711 if (!(op->flags & FCOP_FLAGS_AEN)) {
2712 ret = nvme_fc_map_data(ctrl, op->rq, op);
2714 nvme_cleanup_cmd(op->rq);
2715 nvme_fc_ctrl_put(ctrl);
2716 if (ret == -ENOMEM || ret == -EAGAIN)
2717 return BLK_STS_RESOURCE;
2718 return BLK_STS_IOERR;
2722 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2723 sizeof(op->cmd_iu), DMA_TO_DEVICE);
2725 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
2727 if (!(op->flags & FCOP_FLAGS_AEN))
2728 blk_mq_start_request(op->rq);
2730 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn));
2731 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2732 &ctrl->rport->remoteport,
2733 queue->lldd_handle, &op->fcp_req);
2737 * If the lld fails to send the command is there an issue with
2738 * the csn value? If the command that fails is the Connect,
2739 * no - as the connection won't be live. If it is a command
2740 * post-connect, it's possible a gap in csn may be created.
2741 * Does this matter? As Linux initiators don't send fused
2742 * commands, no. The gap would exist, but as there's nothing
2743 * that depends on csn order to be delivered on the target
2744 * side, it shouldn't hurt. It would be difficult for a
2745 * target to even detect the csn gap as it has no idea when the
2746 * cmd with the csn was supposed to arrive.
2748 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
2749 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2751 if (!(op->flags & FCOP_FLAGS_AEN)) {
2752 nvme_fc_unmap_data(ctrl, op->rq, op);
2753 nvme_cleanup_cmd(op->rq);
2756 nvme_fc_ctrl_put(ctrl);
2758 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2760 return BLK_STS_IOERR;
2762 return BLK_STS_RESOURCE;
2769 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2770 const struct blk_mq_queue_data *bd)
2772 struct nvme_ns *ns = hctx->queue->queuedata;
2773 struct nvme_fc_queue *queue = hctx->driver_data;
2774 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2775 struct request *rq = bd->rq;
2776 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2777 enum nvmefc_fcp_datadir io_dir;
2778 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
2782 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2783 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2784 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2786 ret = nvme_setup_cmd(ns, rq);
2791 * nvme core doesn't quite treat the rq opaquely. Commands such
2792 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2793 * there is no actual payload to be transferred.
2794 * To get it right, key data transmission on there being 1 or
2795 * more physical segments in the sg list. If there is no
2796 * physical segments, there is no payload.
2798 if (blk_rq_nr_phys_segments(rq)) {
2799 data_len = blk_rq_payload_bytes(rq);
2800 io_dir = ((rq_data_dir(rq) == WRITE) ?
2801 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2804 io_dir = NVMEFC_FCP_NODATA;
2808 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2812 nvme_fc_submit_async_event(struct nvme_ctrl *arg)
2814 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2815 struct nvme_fc_fcp_op *aen_op;
2818 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2821 aen_op = &ctrl->aen_ops[0];
2823 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2826 dev_err(ctrl->ctrl.device,
2827 "failed async event work\n");
2831 nvme_fc_complete_rq(struct request *rq)
2833 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
2834 struct nvme_fc_ctrl *ctrl = op->ctrl;
2836 atomic_set(&op->state, FCPOP_STATE_IDLE);
2837 op->flags &= ~FCOP_FLAGS_TERMIO;
2839 nvme_fc_unmap_data(ctrl, rq, op);
2840 nvme_complete_rq(rq);
2841 nvme_fc_ctrl_put(ctrl);
2845 static const struct blk_mq_ops nvme_fc_mq_ops = {
2846 .queue_rq = nvme_fc_queue_rq,
2847 .complete = nvme_fc_complete_rq,
2848 .init_request = nvme_fc_init_request,
2849 .exit_request = nvme_fc_exit_request,
2850 .init_hctx = nvme_fc_init_hctx,
2851 .timeout = nvme_fc_timeout,
2855 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2857 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2858 unsigned int nr_io_queues;
2861 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2862 ctrl->lport->ops->max_hw_queues);
2863 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2865 dev_info(ctrl->ctrl.device,
2866 "set_queue_count failed: %d\n", ret);
2870 ctrl->ctrl.queue_count = nr_io_queues + 1;
2874 nvme_fc_init_io_queues(ctrl);
2876 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2877 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2878 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2879 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
2880 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2881 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2882 ctrl->tag_set.cmd_size =
2883 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
2884 ctrl->lport->ops->fcprqst_priv_sz);
2885 ctrl->tag_set.driver_data = ctrl;
2886 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2887 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2889 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2893 ctrl->ctrl.tagset = &ctrl->tag_set;
2895 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2896 if (IS_ERR(ctrl->ctrl.connect_q)) {
2897 ret = PTR_ERR(ctrl->ctrl.connect_q);
2898 goto out_free_tag_set;
2901 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2903 goto out_cleanup_blk_queue;
2905 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2907 goto out_delete_hw_queues;
2909 ctrl->ioq_live = true;
2913 out_delete_hw_queues:
2914 nvme_fc_delete_hw_io_queues(ctrl);
2915 out_cleanup_blk_queue:
2916 blk_cleanup_queue(ctrl->ctrl.connect_q);
2918 blk_mq_free_tag_set(&ctrl->tag_set);
2919 nvme_fc_free_io_queues(ctrl);
2921 /* force put free routine to ignore io queues */
2922 ctrl->ctrl.tagset = NULL;
2928 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2930 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2931 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2932 unsigned int nr_io_queues;
2935 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
2936 ctrl->lport->ops->max_hw_queues);
2937 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2939 dev_info(ctrl->ctrl.device,
2940 "set_queue_count failed: %d\n", ret);
2944 if (!nr_io_queues && prior_ioq_cnt) {
2945 dev_info(ctrl->ctrl.device,
2946 "Fail Reconnect: At least 1 io queue "
2947 "required (was %d)\n", prior_ioq_cnt);
2951 ctrl->ctrl.queue_count = nr_io_queues + 1;
2952 /* check for io queues existing */
2953 if (ctrl->ctrl.queue_count == 1)
2956 if (prior_ioq_cnt != nr_io_queues) {
2957 dev_info(ctrl->ctrl.device,
2958 "reconnect: revising io queue count from %d to %d\n",
2959 prior_ioq_cnt, nr_io_queues);
2960 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2963 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2965 goto out_free_io_queues;
2967 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2969 goto out_delete_hw_queues;
2973 out_delete_hw_queues:
2974 nvme_fc_delete_hw_io_queues(ctrl);
2976 nvme_fc_free_io_queues(ctrl);
2981 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
2983 struct nvme_fc_lport *lport = rport->lport;
2985 atomic_inc(&lport->act_rport_cnt);
2989 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
2991 struct nvme_fc_lport *lport = rport->lport;
2994 cnt = atomic_dec_return(&lport->act_rport_cnt);
2995 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
2996 lport->ops->localport_delete(&lport->localport);
3000 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3002 struct nvme_fc_rport *rport = ctrl->rport;
3005 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3008 cnt = atomic_inc_return(&rport->act_ctrl_cnt);
3010 nvme_fc_rport_active_on_lport(rport);
3016 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3018 struct nvme_fc_rport *rport = ctrl->rport;
3019 struct nvme_fc_lport *lport = rport->lport;
3022 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3024 cnt = atomic_dec_return(&rport->act_ctrl_cnt);
3026 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
3027 lport->ops->remoteport_delete(&rport->remoteport);
3028 nvme_fc_rport_inactive_on_lport(rport);
3035 * This routine restarts the controller on the host side, and
3036 * on the link side, recreates the controller association.
3039 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3041 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3042 struct nvmefc_ls_rcv_op *disls = NULL;
3043 unsigned long flags;
3047 ++ctrl->ctrl.nr_reconnects;
3049 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3052 if (nvme_fc_ctlr_active_on_rport(ctrl))
3055 dev_info(ctrl->ctrl.device,
3056 "NVME-FC{%d}: create association : host wwpn 0x%016llx "
3057 " rport wwpn 0x%016llx: NQN \"%s\"\n",
3058 ctrl->cnum, ctrl->lport->localport.port_name,
3059 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3061 clear_bit(ASSOC_FAILED, &ctrl->flags);
3064 * Create the admin queue
3067 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3070 goto out_free_queue;
3072 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3073 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
3075 goto out_delete_hw_queue;
3077 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3079 goto out_disconnect_admin_queue;
3081 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3084 * Check controller capabilities
3086 * todo:- add code to check if ctrl attributes changed from
3087 * prior connection values
3090 ret = nvme_enable_ctrl(&ctrl->ctrl);
3091 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3092 goto out_disconnect_admin_queue;
3094 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3095 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3098 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3100 ret = nvme_init_ctrl_finish(&ctrl->ctrl);
3101 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3102 goto out_disconnect_admin_queue;
3106 /* FC-NVME does not have other data in the capsule */
3107 if (ctrl->ctrl.icdoff) {
3108 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3110 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3111 goto out_disconnect_admin_queue;
3114 /* FC-NVME supports normal SGL Data Block Descriptors */
3115 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
3116 dev_err(ctrl->ctrl.device,
3117 "Mandatory sgls are not supported!\n");
3118 ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
3119 goto out_disconnect_admin_queue;
3122 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3123 /* warn if maxcmd is lower than queue_size */
3124 dev_warn(ctrl->ctrl.device,
3125 "queue_size %zu > ctrl maxcmd %u, reducing "
3127 opts->queue_size, ctrl->ctrl.maxcmd);
3128 opts->queue_size = ctrl->ctrl.maxcmd;
3131 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3132 /* warn if sqsize is lower than queue_size */
3133 dev_warn(ctrl->ctrl.device,
3134 "queue_size %zu > ctrl sqsize %u, reducing "
3136 opts->queue_size, ctrl->ctrl.sqsize + 1);
3137 opts->queue_size = ctrl->ctrl.sqsize + 1;
3140 ret = nvme_fc_init_aen_ops(ctrl);
3142 goto out_term_aen_ops;
3145 * Create the io queues
3148 if (ctrl->ctrl.queue_count > 1) {
3149 if (!ctrl->ioq_live)
3150 ret = nvme_fc_create_io_queues(ctrl);
3152 ret = nvme_fc_recreate_io_queues(ctrl);
3154 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3155 goto out_term_aen_ops;
3157 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3159 ctrl->ctrl.nr_reconnects = 0;
3162 nvme_start_ctrl(&ctrl->ctrl);
3164 return 0; /* Success */
3167 nvme_fc_term_aen_ops(ctrl);
3168 out_disconnect_admin_queue:
3169 /* send a Disconnect(association) LS to fc-nvme target */
3170 nvme_fc_xmt_disconnect_assoc(ctrl);
3171 spin_lock_irqsave(&ctrl->lock, flags);
3172 ctrl->association_id = 0;
3173 disls = ctrl->rcv_disconn;
3174 ctrl->rcv_disconn = NULL;
3175 spin_unlock_irqrestore(&ctrl->lock, flags);
3177 nvme_fc_xmt_ls_rsp(disls);
3178 out_delete_hw_queue:
3179 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3181 nvme_fc_free_queue(&ctrl->queues[0]);
3182 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3183 nvme_fc_ctlr_inactive_on_rport(ctrl);
3190 * This routine stops operation of the controller on the host side.
3191 * On the host os stack side: Admin and IO queues are stopped,
3192 * outstanding ios on them terminated via FC ABTS.
3193 * On the link side: the association is terminated.
3196 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3198 struct nvmefc_ls_rcv_op *disls = NULL;
3199 unsigned long flags;
3201 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3204 spin_lock_irqsave(&ctrl->lock, flags);
3205 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3207 spin_unlock_irqrestore(&ctrl->lock, flags);
3209 __nvme_fc_abort_outstanding_ios(ctrl, false);
3211 /* kill the aens as they are a separate path */
3212 nvme_fc_abort_aen_ops(ctrl);
3214 /* wait for all io that had to be aborted */
3215 spin_lock_irq(&ctrl->lock);
3216 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3217 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3218 spin_unlock_irq(&ctrl->lock);
3220 nvme_fc_term_aen_ops(ctrl);
3223 * send a Disconnect(association) LS to fc-nvme target
3224 * Note: could have been sent at top of process, but
3225 * cleaner on link traffic if after the aborts complete.
3226 * Note: if association doesn't exist, association_id will be 0
3228 if (ctrl->association_id)
3229 nvme_fc_xmt_disconnect_assoc(ctrl);
3231 spin_lock_irqsave(&ctrl->lock, flags);
3232 ctrl->association_id = 0;
3233 disls = ctrl->rcv_disconn;
3234 ctrl->rcv_disconn = NULL;
3235 spin_unlock_irqrestore(&ctrl->lock, flags);
3238 * if a Disconnect Request was waiting for a response, send
3239 * now that all ABTS's have been issued (and are complete).
3241 nvme_fc_xmt_ls_rsp(disls);
3243 if (ctrl->ctrl.tagset) {
3244 nvme_fc_delete_hw_io_queues(ctrl);
3245 nvme_fc_free_io_queues(ctrl);
3248 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3249 nvme_fc_free_queue(&ctrl->queues[0]);
3251 /* re-enable the admin_q so anything new can fast fail */
3252 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3254 /* resume the io queues so that things will fast fail */
3255 nvme_start_queues(&ctrl->ctrl);
3257 nvme_fc_ctlr_inactive_on_rport(ctrl);
3261 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
3263 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3265 cancel_work_sync(&ctrl->ioerr_work);
3266 cancel_delayed_work_sync(&ctrl->connect_work);
3268 * kill the association on the link side. this will block
3269 * waiting for io to terminate
3271 nvme_fc_delete_association(ctrl);
3275 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3277 struct nvme_fc_rport *rport = ctrl->rport;
3278 struct nvme_fc_remote_port *portptr = &rport->remoteport;
3279 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3282 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3285 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3286 dev_info(ctrl->ctrl.device,
3287 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
3288 ctrl->cnum, status);
3289 if (status > 0 && (status & NVME_SC_DNR))
3291 } else if (time_after_eq(jiffies, rport->dev_loss_end))
3294 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3295 if (portptr->port_state == FC_OBJSTATE_ONLINE)
3296 dev_info(ctrl->ctrl.device,
3297 "NVME-FC{%d}: Reconnect attempt in %ld "
3299 ctrl->cnum, recon_delay / HZ);
3300 else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
3301 recon_delay = rport->dev_loss_end - jiffies;
3303 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3305 if (portptr->port_state == FC_OBJSTATE_ONLINE) {
3306 if (status > 0 && (status & NVME_SC_DNR))
3307 dev_warn(ctrl->ctrl.device,
3308 "NVME-FC{%d}: reconnect failure\n",
3311 dev_warn(ctrl->ctrl.device,
3312 "NVME-FC{%d}: Max reconnect attempts "
3314 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3316 dev_warn(ctrl->ctrl.device,
3317 "NVME-FC{%d}: dev_loss_tmo (%d) expired "
3318 "while waiting for remoteport connectivity.\n",
3319 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3320 (ctrl->ctrl.opts->max_reconnects *
3321 ctrl->ctrl.opts->reconnect_delay)));
3322 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3327 nvme_fc_reset_ctrl_work(struct work_struct *work)
3329 struct nvme_fc_ctrl *ctrl =
3330 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3332 nvme_stop_ctrl(&ctrl->ctrl);
3334 /* will block will waiting for io to terminate */
3335 nvme_fc_delete_association(ctrl);
3337 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3338 dev_err(ctrl->ctrl.device,
3339 "NVME-FC{%d}: error_recovery: Couldn't change state "
3340 "to CONNECTING\n", ctrl->cnum);
3342 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3343 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3344 dev_err(ctrl->ctrl.device,
3345 "NVME-FC{%d}: failed to schedule connect "
3346 "after reset\n", ctrl->cnum);
3348 flush_delayed_work(&ctrl->connect_work);
3351 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3356 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
3358 .module = THIS_MODULE,
3359 .flags = NVME_F_FABRICS,
3360 .reg_read32 = nvmf_reg_read32,
3361 .reg_read64 = nvmf_reg_read64,
3362 .reg_write32 = nvmf_reg_write32,
3363 .free_ctrl = nvme_fc_nvme_ctrl_freed,
3364 .submit_async_event = nvme_fc_submit_async_event,
3365 .delete_ctrl = nvme_fc_delete_ctrl,
3366 .get_address = nvmf_get_address,
3370 nvme_fc_connect_ctrl_work(struct work_struct *work)
3374 struct nvme_fc_ctrl *ctrl =
3375 container_of(to_delayed_work(work),
3376 struct nvme_fc_ctrl, connect_work);
3378 ret = nvme_fc_create_association(ctrl);
3380 nvme_fc_reconnect_or_delete(ctrl, ret);
3382 dev_info(ctrl->ctrl.device,
3383 "NVME-FC{%d}: controller connect complete\n",
3388 static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
3389 .queue_rq = nvme_fc_queue_rq,
3390 .complete = nvme_fc_complete_rq,
3391 .init_request = nvme_fc_init_request,
3392 .exit_request = nvme_fc_exit_request,
3393 .init_hctx = nvme_fc_init_admin_hctx,
3394 .timeout = nvme_fc_timeout,
3399 * Fails a controller request if it matches an existing controller
3400 * (association) with the same tuple:
3401 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
3403 * The ports don't need to be compared as they are intrinsically
3404 * already matched by the port pointers supplied.
3407 nvme_fc_existing_controller(struct nvme_fc_rport *rport,
3408 struct nvmf_ctrl_options *opts)
3410 struct nvme_fc_ctrl *ctrl;
3411 unsigned long flags;
3414 spin_lock_irqsave(&rport->lock, flags);
3415 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3416 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3420 spin_unlock_irqrestore(&rport->lock, flags);
3425 static struct nvme_ctrl *
3426 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3427 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3429 struct nvme_fc_ctrl *ctrl;
3430 unsigned long flags;
3431 int ret, idx, ctrl_loss_tmo;
3433 if (!(rport->remoteport.port_role &
3434 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
3439 if (!opts->duplicate_connect &&
3440 nvme_fc_existing_controller(rport, opts)) {
3445 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3451 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
3458 * if ctrl_loss_tmo is being enforced and the default reconnect delay
3459 * is being used, change to a shorter reconnect delay for FC.
3461 if (opts->max_reconnects != -1 &&
3462 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY &&
3463 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) {
3464 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay;
3465 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO;
3466 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
3467 opts->reconnect_delay);
3470 ctrl->ctrl.opts = opts;
3471 ctrl->ctrl.nr_reconnects = 0;
3473 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3475 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3476 INIT_LIST_HEAD(&ctrl->ctrl_list);
3477 ctrl->lport = lport;
3478 ctrl->rport = rport;
3479 ctrl->dev = lport->dev;
3481 ctrl->ioq_live = false;
3482 init_waitqueue_head(&ctrl->ioabort_wait);
3484 get_device(ctrl->dev);
3485 kref_init(&ctrl->ref);
3487 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3488 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3489 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3490 spin_lock_init(&ctrl->lock);
3492 /* io queue count */
3493 ctrl->ctrl.queue_count = min_t(unsigned int,
3495 lport->ops->max_hw_queues);
3496 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3498 ctrl->ctrl.sqsize = opts->queue_size - 1;
3499 ctrl->ctrl.kato = opts->kato;
3500 ctrl->ctrl.cntlid = 0xffff;
3503 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3504 sizeof(struct nvme_fc_queue), GFP_KERNEL);
3508 nvme_fc_init_queue(ctrl, 0);
3510 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3511 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3512 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3513 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
3514 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3515 ctrl->admin_tag_set.cmd_size =
3516 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
3517 ctrl->lport->ops->fcprqst_priv_sz);
3518 ctrl->admin_tag_set.driver_data = ctrl;
3519 ctrl->admin_tag_set.nr_hw_queues = 1;
3520 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
3521 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3523 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3525 goto out_free_queues;
3526 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3528 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3529 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3530 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3531 goto out_free_admin_tag_set;
3534 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3535 if (IS_ERR(ctrl->ctrl.admin_q)) {
3536 ret = PTR_ERR(ctrl->ctrl.admin_q);
3537 goto out_cleanup_fabrics_q;
3541 * Would have been nice to init io queues tag set as well.
3542 * However, we require interaction from the controller
3543 * for max io queue count before we can do so.
3544 * Defer this to the connect path.
3547 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3549 goto out_cleanup_admin_q;
3551 /* at this point, teardown path changes to ref counting on nvme ctrl */
3553 spin_lock_irqsave(&rport->lock, flags);
3554 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3555 spin_unlock_irqrestore(&rport->lock, flags);
3557 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3558 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3559 dev_err(ctrl->ctrl.device,
3560 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3564 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3565 dev_err(ctrl->ctrl.device,
3566 "NVME-FC{%d}: failed to schedule initial connect\n",
3571 flush_delayed_work(&ctrl->connect_work);
3573 dev_info(ctrl->ctrl.device,
3574 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3575 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3580 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3581 cancel_work_sync(&ctrl->ioerr_work);
3582 cancel_work_sync(&ctrl->ctrl.reset_work);
3583 cancel_delayed_work_sync(&ctrl->connect_work);
3585 ctrl->ctrl.opts = NULL;
3587 /* initiate nvme ctrl ref counting teardown */
3588 nvme_uninit_ctrl(&ctrl->ctrl);
3590 /* Remove core ctrl ref. */
3591 nvme_put_ctrl(&ctrl->ctrl);
3593 /* as we're past the point where we transition to the ref
3594 * counting teardown path, if we return a bad pointer here,
3595 * the calling routine, thinking it's prior to the
3596 * transition, will do an rport put. Since the teardown
3597 * path also does a rport put, we do an extra get here to
3598 * so proper order/teardown happens.
3600 nvme_fc_rport_get(rport);
3602 return ERR_PTR(-EIO);
3604 out_cleanup_admin_q:
3605 blk_cleanup_queue(ctrl->ctrl.admin_q);
3606 out_cleanup_fabrics_q:
3607 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3608 out_free_admin_tag_set:
3609 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3611 kfree(ctrl->queues);
3613 put_device(ctrl->dev);
3614 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3618 /* exit via here doesn't follow ctlr ref points */
3619 return ERR_PTR(ret);
3623 struct nvmet_fc_traddr {
3629 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
3633 if (match_u64(sstr, &token64))
3641 * This routine validates and extracts the WWN's from the TRADDR string.
3642 * As kernel parsers need the 0x to determine number base, universally
3643 * build string to parse with 0x prefix before parsing name strings.
3646 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
3648 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
3649 substring_t wwn = { name, &name[sizeof(name)-1] };
3650 int nnoffset, pnoffset;
3652 /* validate if string is one of the 2 allowed formats */
3653 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
3654 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
3655 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
3656 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
3657 nnoffset = NVME_FC_TRADDR_OXNNLEN;
3658 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
3659 NVME_FC_TRADDR_OXNNLEN;
3660 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
3661 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
3662 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
3663 "pn-", NVME_FC_TRADDR_NNLEN))) {
3664 nnoffset = NVME_FC_TRADDR_NNLEN;
3665 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
3671 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
3673 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3674 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
3677 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
3678 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
3684 pr_warn("%s: bad traddr string\n", __func__);
3688 static struct nvme_ctrl *
3689 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
3691 struct nvme_fc_lport *lport;
3692 struct nvme_fc_rport *rport;
3693 struct nvme_ctrl *ctrl;
3694 struct nvmet_fc_traddr laddr = { 0L, 0L };
3695 struct nvmet_fc_traddr raddr = { 0L, 0L };
3696 unsigned long flags;
3699 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
3700 if (ret || !raddr.nn || !raddr.pn)
3701 return ERR_PTR(-EINVAL);
3703 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
3704 if (ret || !laddr.nn || !laddr.pn)
3705 return ERR_PTR(-EINVAL);
3707 /* find the host and remote ports to connect together */
3708 spin_lock_irqsave(&nvme_fc_lock, flags);
3709 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3710 if (lport->localport.node_name != laddr.nn ||
3711 lport->localport.port_name != laddr.pn ||
3712 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3715 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3716 if (rport->remoteport.node_name != raddr.nn ||
3717 rport->remoteport.port_name != raddr.pn ||
3718 rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3721 /* if fail to get reference fall through. Will error */
3722 if (!nvme_fc_rport_get(rport))
3725 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3727 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3729 nvme_fc_rport_put(rport);
3733 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3735 pr_warn("%s: %s - %s combination not found\n",
3736 __func__, opts->traddr, opts->host_traddr);
3737 return ERR_PTR(-ENOENT);
3741 static struct nvmf_transport_ops nvme_fc_transport = {
3743 .module = THIS_MODULE,
3744 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
3745 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
3746 .create_ctrl = nvme_fc_create_ctrl,
3749 /* Arbitrary successive failures max. With lots of subsystems could be high */
3750 #define DISCOVERY_MAX_FAIL 20
3752 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
3753 struct device_attribute *attr, const char *buf, size_t count)
3755 unsigned long flags;
3756 LIST_HEAD(local_disc_list);
3757 struct nvme_fc_lport *lport;
3758 struct nvme_fc_rport *rport;
3761 spin_lock_irqsave(&nvme_fc_lock, flags);
3763 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3764 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3765 if (!nvme_fc_lport_get(lport))
3767 if (!nvme_fc_rport_get(rport)) {
3769 * This is a temporary condition. Upon restart
3770 * this rport will be gone from the list.
3772 * Revert the lport put and retry. Anything
3773 * added to the list already will be skipped (as
3774 * they are no longer list_empty). Loops should
3775 * resume at rports that were not yet seen.
3777 nvme_fc_lport_put(lport);
3779 if (failcnt++ < DISCOVERY_MAX_FAIL)
3782 pr_err("nvme_discovery: too many reference "
3784 goto process_local_list;
3786 if (list_empty(&rport->disc_list))
3787 list_add_tail(&rport->disc_list,
3793 while (!list_empty(&local_disc_list)) {
3794 rport = list_first_entry(&local_disc_list,
3795 struct nvme_fc_rport, disc_list);
3796 list_del_init(&rport->disc_list);
3797 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3799 lport = rport->lport;
3800 /* signal discovery. Won't hurt if it repeats */
3801 nvme_fc_signal_discovery_scan(lport, rport);
3802 nvme_fc_rport_put(rport);
3803 nvme_fc_lport_put(lport);
3805 spin_lock_irqsave(&nvme_fc_lock, flags);
3807 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3812 /* Parse the cgroup id from a buf and return the length of cgrpid */
3813 static int fc_parse_cgrpid(const char *buf, u64 *id)
3818 memset(cgrp_id, 0x0, sizeof(cgrp_id));
3819 for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
3820 if (buf[cgrpid_len] != ':')
3821 cgrp_id[cgrpid_len] = buf[cgrpid_len];
3829 if (kstrtou64(cgrp_id, 16, id) < 0)
3835 * fc_update_appid: Parse and update the appid in the blkcg associated with
3837 * @buf: buf contains both cgrpid and appid info
3838 * @count: size of the buffer
3840 static int fc_update_appid(const char *buf, size_t count)
3845 char app_id[FC_APPID_LEN];
3848 if (buf[count-1] == '\n')
3851 if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
3854 cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
3857 appid_len = count - cgrpid_len - 1;
3858 if (appid_len > FC_APPID_LEN)
3861 memset(app_id, 0x0, sizeof(app_id));
3862 memcpy(app_id, &buf[cgrpid_len+1], appid_len);
3863 ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
3869 static ssize_t fc_appid_store(struct device *dev,
3870 struct device_attribute *attr, const char *buf, size_t count)
3874 ret = fc_update_appid(buf, count);
3879 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
3880 static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
3882 static struct attribute *nvme_fc_attrs[] = {
3883 &dev_attr_nvme_discovery.attr,
3884 &dev_attr_appid_store.attr,
3888 static const struct attribute_group nvme_fc_attr_group = {
3889 .attrs = nvme_fc_attrs,
3892 static const struct attribute_group *nvme_fc_attr_groups[] = {
3893 &nvme_fc_attr_group,
3897 static struct class fc_class = {
3899 .dev_groups = nvme_fc_attr_groups,
3900 .owner = THIS_MODULE,
3903 static int __init nvme_fc_init_module(void)
3907 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
3913 * It is expected that in the future the kernel will combine
3914 * the FC-isms that are currently under scsi and now being
3915 * added to by NVME into a new standalone FC class. The SCSI
3916 * and NVME protocols and their devices would be under this
3919 * As we need something to post FC-specific udev events to,
3920 * specifically for nvme probe events, start by creating the
3921 * new device class. When the new standalone FC class is
3922 * put in place, this code will move to a more generic
3923 * location for the class.
3925 ret = class_register(&fc_class);
3927 pr_err("couldn't register class fc\n");
3928 goto out_destroy_wq;
3932 * Create a device for the FC-centric udev events
3934 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
3936 if (IS_ERR(fc_udev_device)) {
3937 pr_err("couldn't create fc_udev device!\n");
3938 ret = PTR_ERR(fc_udev_device);
3939 goto out_destroy_class;
3942 ret = nvmf_register_transport(&nvme_fc_transport);
3944 goto out_destroy_device;
3949 device_destroy(&fc_class, MKDEV(0, 0));
3951 class_unregister(&fc_class);
3953 destroy_workqueue(nvme_fc_wq);
3959 nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
3961 struct nvme_fc_ctrl *ctrl;
3963 spin_lock(&rport->lock);
3964 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3965 dev_warn(ctrl->ctrl.device,
3966 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3968 nvme_delete_ctrl(&ctrl->ctrl);
3970 spin_unlock(&rport->lock);
3974 nvme_fc_cleanup_for_unload(void)
3976 struct nvme_fc_lport *lport;
3977 struct nvme_fc_rport *rport;
3979 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3980 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3981 nvme_fc_delete_controllers(rport);
3986 static void __exit nvme_fc_exit_module(void)
3988 unsigned long flags;
3989 bool need_cleanup = false;
3991 spin_lock_irqsave(&nvme_fc_lock, flags);
3992 nvme_fc_waiting_to_unload = true;
3993 if (!list_empty(&nvme_fc_lport_list)) {
3994 need_cleanup = true;
3995 nvme_fc_cleanup_for_unload();
3997 spin_unlock_irqrestore(&nvme_fc_lock, flags);
3999 pr_info("%s: waiting for ctlr deletes\n", __func__);
4000 wait_for_completion(&nvme_fc_unload_proceed);
4001 pr_info("%s: ctrl deletes complete\n", __func__);
4004 nvmf_unregister_transport(&nvme_fc_transport);
4006 ida_destroy(&nvme_fc_local_port_cnt);
4007 ida_destroy(&nvme_fc_ctrl_cnt);
4009 device_destroy(&fc_class, MKDEV(0, 0));
4010 class_unregister(&fc_class);
4011 destroy_workqueue(nvme_fc_wq);
4014 module_init(nvme_fc_init_module);
4015 module_exit(nvme_fc_exit_module);
4017 MODULE_LICENSE("GPL v2");