2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
19 static struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
22 * This read/write semaphore is used to synchronize access to configuration
23 * information on a target system that will result in discovery log page
24 * information change for at least one host.
25 * The full list of resources to protected by this semaphore is:
28 * - per-subsystem allowed hosts list
29 * - allow_any_host subsystem attribute
31 * - the nvmet_transports array
33 * When updating any of those lists/structures write lock should be obtained,
34 * while when reading (popolating discovery log page or checking host-subsystem
35 * link) read lock is obtained to allow concurrent reads.
37 DECLARE_RWSEM(nvmet_config_sem);
39 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
40 const char *subsysnqn);
42 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
45 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
46 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
52 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
53 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
59 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
62 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
64 struct nvmet_req *req;
67 mutex_lock(&ctrl->lock);
68 if (!ctrl->nr_async_event_cmds) {
69 mutex_unlock(&ctrl->lock);
73 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
74 mutex_unlock(&ctrl->lock);
75 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
79 static void nvmet_async_event_work(struct work_struct *work)
81 struct nvmet_ctrl *ctrl =
82 container_of(work, struct nvmet_ctrl, async_event_work);
83 struct nvmet_async_event *aen;
84 struct nvmet_req *req;
87 mutex_lock(&ctrl->lock);
88 aen = list_first_entry_or_null(&ctrl->async_events,
89 struct nvmet_async_event, entry);
90 if (!aen || !ctrl->nr_async_event_cmds) {
91 mutex_unlock(&ctrl->lock);
95 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
96 nvmet_set_result(req, nvmet_async_event_result(aen));
98 list_del(&aen->entry);
101 mutex_unlock(&ctrl->lock);
102 nvmet_req_complete(req, 0);
106 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
107 u8 event_info, u8 log_page)
109 struct nvmet_async_event *aen;
111 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
115 aen->event_type = event_type;
116 aen->event_info = event_info;
117 aen->log_page = log_page;
119 mutex_lock(&ctrl->lock);
120 list_add_tail(&aen->entry, &ctrl->async_events);
121 mutex_unlock(&ctrl->lock);
123 schedule_work(&ctrl->async_event_work);
126 int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
130 down_write(&nvmet_config_sem);
131 if (nvmet_transports[ops->type])
134 nvmet_transports[ops->type] = ops;
135 up_write(&nvmet_config_sem);
139 EXPORT_SYMBOL_GPL(nvmet_register_transport);
141 void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
143 down_write(&nvmet_config_sem);
144 nvmet_transports[ops->type] = NULL;
145 up_write(&nvmet_config_sem);
147 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
149 int nvmet_enable_port(struct nvmet_port *port)
151 struct nvmet_fabrics_ops *ops;
154 lockdep_assert_held(&nvmet_config_sem);
156 ops = nvmet_transports[port->disc_addr.trtype];
158 up_write(&nvmet_config_sem);
159 request_module("nvmet-transport-%d", port->disc_addr.trtype);
160 down_write(&nvmet_config_sem);
161 ops = nvmet_transports[port->disc_addr.trtype];
163 pr_err("transport type %d not supported\n",
164 port->disc_addr.trtype);
169 if (!try_module_get(ops->owner))
172 ret = ops->add_port(port);
174 module_put(ops->owner);
178 port->enabled = true;
182 void nvmet_disable_port(struct nvmet_port *port)
184 struct nvmet_fabrics_ops *ops;
186 lockdep_assert_held(&nvmet_config_sem);
188 port->enabled = false;
190 ops = nvmet_transports[port->disc_addr.trtype];
191 ops->remove_port(port);
192 module_put(ops->owner);
195 static void nvmet_keep_alive_timer(struct work_struct *work)
197 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
198 struct nvmet_ctrl, ka_work);
200 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
201 ctrl->cntlid, ctrl->kato);
203 ctrl->ops->delete_ctrl(ctrl);
206 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
208 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
209 ctrl->cntlid, ctrl->kato);
211 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
212 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
215 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
217 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
219 cancel_delayed_work_sync(&ctrl->ka_work);
222 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
227 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
228 if (ns->nsid == le32_to_cpu(nsid))
235 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
240 ns = __nvmet_find_namespace(ctrl, nsid);
242 percpu_ref_get(&ns->ref);
248 static void nvmet_destroy_namespace(struct percpu_ref *ref)
250 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
252 complete(&ns->disable_done);
255 void nvmet_put_namespace(struct nvmet_ns *ns)
257 percpu_ref_put(&ns->ref);
260 int nvmet_ns_enable(struct nvmet_ns *ns)
262 struct nvmet_subsys *subsys = ns->subsys;
263 struct nvmet_ctrl *ctrl;
266 mutex_lock(&subsys->lock);
267 if (!list_empty(&ns->dev_link))
270 ns->bdev = blkdev_get_by_path(ns->device_path, FMODE_READ | FMODE_WRITE,
272 if (IS_ERR(ns->bdev)) {
273 pr_err("nvmet: failed to open block device %s: (%ld)\n",
274 ns->device_path, PTR_ERR(ns->bdev));
275 ret = PTR_ERR(ns->bdev);
280 ns->size = i_size_read(ns->bdev->bd_inode);
281 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
283 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
288 if (ns->nsid > subsys->max_nsid)
289 subsys->max_nsid = ns->nsid;
292 * The namespaces list needs to be sorted to simplify the implementation
293 * of the Identify Namepace List subcommand.
295 if (list_empty(&subsys->namespaces)) {
296 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
298 struct nvmet_ns *old;
300 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
301 BUG_ON(ns->nsid == old->nsid);
302 if (ns->nsid < old->nsid)
306 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
309 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
310 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
314 mutex_unlock(&subsys->lock);
317 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
322 void nvmet_ns_disable(struct nvmet_ns *ns)
324 struct nvmet_subsys *subsys = ns->subsys;
325 struct nvmet_ctrl *ctrl;
327 mutex_lock(&subsys->lock);
328 if (list_empty(&ns->dev_link)) {
329 mutex_unlock(&subsys->lock);
332 list_del_init(&ns->dev_link);
333 mutex_unlock(&subsys->lock);
336 * Now that we removed the namespaces from the lookup list, we
337 * can kill the per_cpu ref and wait for any remaining references
338 * to be dropped, as well as a RCU grace period for anyone only
339 * using the namepace under rcu_read_lock(). Note that we can't
340 * use call_rcu here as we need to ensure the namespaces have
341 * been fully destroyed before unloading the module.
343 percpu_ref_kill(&ns->ref);
345 wait_for_completion(&ns->disable_done);
346 percpu_ref_exit(&ns->ref);
348 mutex_lock(&subsys->lock);
349 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
350 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0);
353 blkdev_put(ns->bdev, FMODE_WRITE|FMODE_READ);
354 mutex_unlock(&subsys->lock);
357 void nvmet_ns_free(struct nvmet_ns *ns)
359 nvmet_ns_disable(ns);
361 kfree(ns->device_path);
365 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
369 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
373 INIT_LIST_HEAD(&ns->dev_link);
374 init_completion(&ns->disable_done);
382 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
385 nvmet_set_status(req, status);
387 /* XXX: need to fill in something useful for sq_head */
388 req->rsp->sq_head = 0;
389 if (likely(req->sq)) /* may happen during early failure */
390 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
391 req->rsp->command_id = req->cmd->common.command_id;
394 nvmet_put_namespace(req->ns);
395 req->ops->queue_response(req);
398 void nvmet_req_complete(struct nvmet_req *req, u16 status)
400 __nvmet_req_complete(req, status);
401 percpu_ref_put(&req->sq->ref);
403 EXPORT_SYMBOL_GPL(nvmet_req_complete);
405 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
414 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
423 void nvmet_sq_destroy(struct nvmet_sq *sq)
426 * If this is the admin queue, complete all AERs so that our
427 * queue doesn't have outstanding requests on it.
429 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
430 nvmet_async_events_free(sq->ctrl);
431 percpu_ref_kill(&sq->ref);
432 wait_for_completion(&sq->free_done);
433 percpu_ref_exit(&sq->ref);
436 nvmet_ctrl_put(sq->ctrl);
437 sq->ctrl = NULL; /* allows reusing the queue later */
440 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
442 static void nvmet_sq_free(struct percpu_ref *ref)
444 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
446 complete(&sq->free_done);
449 int nvmet_sq_init(struct nvmet_sq *sq)
453 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
455 pr_err("percpu_ref init failed!\n");
458 init_completion(&sq->free_done);
462 EXPORT_SYMBOL_GPL(nvmet_sq_init);
464 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
465 struct nvmet_sq *sq, struct nvmet_fabrics_ops *ops)
467 u8 flags = req->cmd->common.flags;
475 req->rsp->status = 0;
477 /* no support for fused commands yet */
478 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
479 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
483 /* either variant of SGLs is fine, as we don't support metadata */
484 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF &&
485 (flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METASEG)) {
486 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
490 if (unlikely(!req->sq->ctrl))
491 /* will return an error for any Non-connect command: */
492 status = nvmet_parse_connect_cmd(req);
493 else if (likely(req->sq->qid != 0))
494 status = nvmet_parse_io_cmd(req);
495 else if (req->cmd->common.opcode == nvme_fabrics_command)
496 status = nvmet_parse_fabrics_cmd(req);
497 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
498 status = nvmet_parse_discovery_cmd(req);
500 status = nvmet_parse_admin_cmd(req);
505 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
506 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
513 __nvmet_req_complete(req, status);
516 EXPORT_SYMBOL_GPL(nvmet_req_init);
518 static inline bool nvmet_cc_en(u32 cc)
523 static inline u8 nvmet_cc_css(u32 cc)
525 return (cc >> 4) & 0x7;
528 static inline u8 nvmet_cc_mps(u32 cc)
530 return (cc >> 7) & 0xf;
533 static inline u8 nvmet_cc_ams(u32 cc)
535 return (cc >> 11) & 0x7;
538 static inline u8 nvmet_cc_shn(u32 cc)
540 return (cc >> 14) & 0x3;
543 static inline u8 nvmet_cc_iosqes(u32 cc)
545 return (cc >> 16) & 0xf;
548 static inline u8 nvmet_cc_iocqes(u32 cc)
550 return (cc >> 20) & 0xf;
553 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
555 lockdep_assert_held(&ctrl->lock);
557 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
558 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
559 nvmet_cc_mps(ctrl->cc) != 0 ||
560 nvmet_cc_ams(ctrl->cc) != 0 ||
561 nvmet_cc_css(ctrl->cc) != 0) {
562 ctrl->csts = NVME_CSTS_CFS;
566 ctrl->csts = NVME_CSTS_RDY;
569 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
571 lockdep_assert_held(&ctrl->lock);
573 /* XXX: tear down queues? */
574 ctrl->csts &= ~NVME_CSTS_RDY;
578 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
582 mutex_lock(&ctrl->lock);
586 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
587 nvmet_start_ctrl(ctrl);
588 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
589 nvmet_clear_ctrl(ctrl);
590 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
591 nvmet_clear_ctrl(ctrl);
592 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
594 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
595 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
596 mutex_unlock(&ctrl->lock);
599 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
601 /* command sets supported: NVMe command set: */
602 ctrl->cap = (1ULL << 37);
603 /* CC.EN timeout in 500msec units: */
604 ctrl->cap |= (15ULL << 24);
605 /* maximum queue entries supported: */
606 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
609 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
610 struct nvmet_req *req, struct nvmet_ctrl **ret)
612 struct nvmet_subsys *subsys;
613 struct nvmet_ctrl *ctrl;
616 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
618 pr_warn("connect request for invalid subsystem %s!\n",
620 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
621 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
624 mutex_lock(&subsys->lock);
625 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
626 if (ctrl->cntlid == cntlid) {
627 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
628 pr_warn("hostnqn mismatch.\n");
631 if (!kref_get_unless_zero(&ctrl->ref))
639 pr_warn("could not find controller %d for subsys %s / host %s\n",
640 cntlid, subsysnqn, hostnqn);
641 req->rsp->result = IPO_IATTR_CONNECT_DATA(cntlid);
642 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
645 mutex_unlock(&subsys->lock);
646 nvmet_subsys_put(subsys);
650 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
653 struct nvmet_host_link *p;
655 if (subsys->allow_any_host)
658 list_for_each_entry(p, &subsys->hosts, entry) {
659 if (!strcmp(nvmet_host_name(p->host), hostnqn))
666 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
669 struct nvmet_subsys_link *s;
671 list_for_each_entry(s, &req->port->subsystems, entry) {
672 if (__nvmet_host_allowed(s->subsys, hostnqn))
679 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
682 lockdep_assert_held(&nvmet_config_sem);
684 if (subsys->type == NVME_NQN_DISC)
685 return nvmet_host_discovery_allowed(req, hostnqn);
687 return __nvmet_host_allowed(subsys, hostnqn);
690 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
691 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
693 struct nvmet_subsys *subsys;
694 struct nvmet_ctrl *ctrl;
698 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
699 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
701 pr_warn("connect request for invalid subsystem %s!\n",
703 req->rsp->result = IPO_IATTR_CONNECT_DATA(subsysnqn);
707 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
708 down_read(&nvmet_config_sem);
709 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
710 pr_info("connect by host %s for subsystem %s not allowed\n",
712 req->rsp->result = IPO_IATTR_CONNECT_DATA(hostnqn);
713 up_read(&nvmet_config_sem);
714 goto out_put_subsystem;
716 up_read(&nvmet_config_sem);
718 status = NVME_SC_INTERNAL;
719 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
721 goto out_put_subsystem;
722 mutex_init(&ctrl->lock);
724 nvmet_init_cap(ctrl);
726 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
727 INIT_LIST_HEAD(&ctrl->async_events);
729 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
730 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
732 /* generate a random serial number as our controllers are ephemeral: */
733 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
735 kref_init(&ctrl->ref);
736 ctrl->subsys = subsys;
738 ctrl->cqs = kcalloc(subsys->max_qid + 1,
739 sizeof(struct nvmet_cq *),
744 ctrl->sqs = kcalloc(subsys->max_qid + 1,
745 sizeof(struct nvmet_sq *),
750 ret = ida_simple_get(&subsys->cntlid_ida,
751 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
754 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
759 ctrl->ops = req->ops;
760 if (ctrl->subsys->type == NVME_NQN_DISC) {
761 /* Don't accept keep-alive timeout for discovery controllers */
763 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
768 * Discovery controllers use some arbitrary high value in order
769 * to cleanup stale discovery sessions
771 * From the latest base diff RC:
772 * "The Keep Alive command is not supported by
773 * Discovery controllers. A transport may specify a
774 * fixed Discovery controller activity timeout value
775 * (e.g., 2 minutes). If no commands are received
776 * by a Discovery controller within that time
777 * period, the controller may perform the
778 * actions for Keep Alive Timer expiration".
780 ctrl->kato = NVMET_DISC_KATO;
782 /* keep-alive timeout in seconds */
783 ctrl->kato = DIV_ROUND_UP(kato, 1000);
785 nvmet_start_keep_alive_timer(ctrl);
787 mutex_lock(&subsys->lock);
788 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
789 mutex_unlock(&subsys->lock);
801 nvmet_subsys_put(subsys);
806 static void nvmet_ctrl_free(struct kref *ref)
808 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
809 struct nvmet_subsys *subsys = ctrl->subsys;
811 nvmet_stop_keep_alive_timer(ctrl);
813 mutex_lock(&subsys->lock);
814 list_del(&ctrl->subsys_entry);
815 mutex_unlock(&subsys->lock);
817 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
818 nvmet_subsys_put(subsys);
825 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
827 kref_put(&ctrl->ref, nvmet_ctrl_free);
830 static void nvmet_fatal_error_handler(struct work_struct *work)
832 struct nvmet_ctrl *ctrl =
833 container_of(work, struct nvmet_ctrl, fatal_err_work);
835 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
836 ctrl->ops->delete_ctrl(ctrl);
839 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
841 ctrl->csts |= NVME_CSTS_CFS;
842 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
843 schedule_work(&ctrl->fatal_err_work);
845 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
847 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
848 const char *subsysnqn)
850 struct nvmet_subsys_link *p;
855 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
857 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
859 return nvmet_disc_subsys;
862 down_read(&nvmet_config_sem);
863 list_for_each_entry(p, &port->subsystems, entry) {
864 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
866 if (!kref_get_unless_zero(&p->subsys->ref))
868 up_read(&nvmet_config_sem);
872 up_read(&nvmet_config_sem);
876 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
877 enum nvme_subsys_type type)
879 struct nvmet_subsys *subsys;
881 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
885 subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
889 subsys->max_qid = NVMET_NR_QUEUES;
895 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
900 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
902 if (!subsys->subsysnqn) {
907 kref_init(&subsys->ref);
909 mutex_init(&subsys->lock);
910 INIT_LIST_HEAD(&subsys->namespaces);
911 INIT_LIST_HEAD(&subsys->ctrls);
913 ida_init(&subsys->cntlid_ida);
915 INIT_LIST_HEAD(&subsys->hosts);
920 static void nvmet_subsys_free(struct kref *ref)
922 struct nvmet_subsys *subsys =
923 container_of(ref, struct nvmet_subsys, ref);
925 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
927 ida_destroy(&subsys->cntlid_ida);
928 kfree(subsys->subsysnqn);
932 void nvmet_subsys_put(struct nvmet_subsys *subsys)
934 kref_put(&subsys->ref, nvmet_subsys_free);
937 static int __init nvmet_init(void)
941 error = nvmet_init_discovery();
945 error = nvmet_init_configfs();
947 goto out_exit_discovery;
951 nvmet_exit_discovery();
956 static void __exit nvmet_exit(void)
958 nvmet_exit_configfs();
959 nvmet_exit_discovery();
961 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
962 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
965 module_init(nvmet_init);
966 module_exit(nvmet_exit);
968 MODULE_LICENSE("GPL v2");