1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2018 Christoph Hellwig.
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15 "turn on native support for multiple controllers per subsystem");
17 static const char *nvme_iopolicy_names[] = {
18 [NVME_IOPOLICY_NUMA] = "numa",
19 [NVME_IOPOLICY_RR] = "round-robin",
22 static int iopolicy = NVME_IOPOLICY_NUMA;
24 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
28 if (!strncmp(val, "numa", 4))
29 iopolicy = NVME_IOPOLICY_NUMA;
30 else if (!strncmp(val, "round-robin", 11))
31 iopolicy = NVME_IOPOLICY_RR;
38 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
40 return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
43 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
45 MODULE_PARM_DESC(iopolicy,
46 "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
48 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
50 subsys->iopolicy = iopolicy;
53 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
55 struct nvme_ns_head *h;
57 lockdep_assert_held(&subsys->lock);
58 list_for_each_entry(h, &subsys->nsheads, entry)
60 blk_mq_unfreeze_queue(h->disk->queue);
63 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
65 struct nvme_ns_head *h;
67 lockdep_assert_held(&subsys->lock);
68 list_for_each_entry(h, &subsys->nsheads, entry)
70 blk_mq_freeze_queue_wait(h->disk->queue);
73 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
75 struct nvme_ns_head *h;
77 lockdep_assert_held(&subsys->lock);
78 list_for_each_entry(h, &subsys->nsheads, entry)
80 blk_freeze_queue_start(h->disk->queue);
83 void nvme_failover_req(struct request *req)
85 struct nvme_ns *ns = req->q->queuedata;
86 u16 status = nvme_req(req)->status & 0x7ff;
90 nvme_mpath_clear_current_path(ns);
93 * If we got back an ANA error, we know the controller is alive but not
94 * ready to serve this namespace. Kick of a re-read of the ANA
95 * information page, and just try any other available path for now.
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
99 queue_work(nvme_wq, &ns->ctrl->ana_work);
102 spin_lock_irqsave(&ns->head->requeue_lock, flags);
103 for (bio = req->bio; bio; bio = bio->bi_next) {
104 bio_set_dev(bio, ns->head->disk->part0);
105 if (bio->bi_opf & REQ_POLLED) {
106 bio->bi_opf &= ~REQ_POLLED;
107 bio->bi_cookie = BLK_QC_T_NONE;
110 blk_steal_bios(&ns->head->requeue_list, req);
111 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
113 blk_mq_end_request(req, 0);
114 kblockd_schedule_work(&ns->head->requeue_work);
117 void nvme_mpath_start_request(struct request *rq)
119 struct nvme_ns *ns = rq->q->queuedata;
120 struct gendisk *disk = ns->head->disk;
122 if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
125 nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
126 nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
129 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
131 void nvme_mpath_end_request(struct request *rq)
133 struct nvme_ns *ns = rq->q->queuedata;
135 if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
137 bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
138 blk_rq_bytes(rq) >> SECTOR_SHIFT,
139 nvme_req(rq)->start_time);
142 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
146 down_read(&ctrl->namespaces_rwsem);
147 list_for_each_entry(ns, &ctrl->namespaces, list) {
150 kblockd_schedule_work(&ns->head->requeue_work);
151 if (ctrl->state == NVME_CTRL_LIVE)
152 disk_uevent(ns->head->disk, KOBJ_CHANGE);
154 up_read(&ctrl->namespaces_rwsem);
157 static const char *nvme_ana_state_names[] = {
158 [0] = "invalid state",
159 [NVME_ANA_OPTIMIZED] = "optimized",
160 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
161 [NVME_ANA_INACCESSIBLE] = "inaccessible",
162 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
163 [NVME_ANA_CHANGE] = "change",
166 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
168 struct nvme_ns_head *head = ns->head;
169 bool changed = false;
175 for_each_node(node) {
176 if (ns == rcu_access_pointer(head->current_path[node])) {
177 rcu_assign_pointer(head->current_path[node], NULL);
185 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
189 down_read(&ctrl->namespaces_rwsem);
190 list_for_each_entry(ns, &ctrl->namespaces, list) {
191 nvme_mpath_clear_current_path(ns);
192 kblockd_schedule_work(&ns->head->requeue_work);
194 up_read(&ctrl->namespaces_rwsem);
197 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
199 struct nvme_ns_head *head = ns->head;
200 sector_t capacity = get_capacity(head->disk);
204 srcu_idx = srcu_read_lock(&head->srcu);
205 list_for_each_entry_rcu(ns, &head->list, siblings) {
206 if (capacity != get_capacity(ns->disk))
207 clear_bit(NVME_NS_READY, &ns->flags);
209 srcu_read_unlock(&head->srcu, srcu_idx);
212 rcu_assign_pointer(head->current_path[node], NULL);
213 kblockd_schedule_work(&head->requeue_work);
216 static bool nvme_path_is_disabled(struct nvme_ns *ns)
219 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
220 * still be able to complete assuming that the controller is connected.
221 * Otherwise it will fail immediately and return to the requeue list.
223 if (ns->ctrl->state != NVME_CTRL_LIVE &&
224 ns->ctrl->state != NVME_CTRL_DELETING)
226 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
227 !test_bit(NVME_NS_READY, &ns->flags))
232 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
234 int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
235 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
237 list_for_each_entry_rcu(ns, &head->list, siblings) {
238 if (nvme_path_is_disabled(ns))
241 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
242 distance = node_distance(node, ns->ctrl->numa_node);
244 distance = LOCAL_DISTANCE;
246 switch (ns->ana_state) {
247 case NVME_ANA_OPTIMIZED:
248 if (distance < found_distance) {
249 found_distance = distance;
253 case NVME_ANA_NONOPTIMIZED:
254 if (distance < fallback_distance) {
255 fallback_distance = distance;
267 rcu_assign_pointer(head->current_path[node], found);
271 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
274 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
278 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
281 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
282 int node, struct nvme_ns *old)
284 struct nvme_ns *ns, *found = NULL;
286 if (list_is_singular(&head->list)) {
287 if (nvme_path_is_disabled(old))
292 for (ns = nvme_next_ns(head, old);
294 ns = nvme_next_ns(head, ns)) {
295 if (nvme_path_is_disabled(ns))
298 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
302 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
307 * The loop above skips the current path for round-robin semantics.
308 * Fall back to the current path if either:
309 * - no other optimized path found and current is optimized,
310 * - no other usable path found and current is usable.
312 if (!nvme_path_is_disabled(old) &&
313 (old->ana_state == NVME_ANA_OPTIMIZED ||
314 (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
320 rcu_assign_pointer(head->current_path[node], found);
324 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
326 return ns->ctrl->state == NVME_CTRL_LIVE &&
327 ns->ana_state == NVME_ANA_OPTIMIZED;
330 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
332 int node = numa_node_id();
335 ns = srcu_dereference(head->current_path[node], &head->srcu);
337 return __nvme_find_path(head, node);
339 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
340 return nvme_round_robin_path(head, node, ns);
341 if (unlikely(!nvme_path_is_optimized(ns)))
342 return __nvme_find_path(head, node);
346 static bool nvme_available_path(struct nvme_ns_head *head)
350 list_for_each_entry_rcu(ns, &head->list, siblings) {
351 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
353 switch (ns->ctrl->state) {
355 case NVME_CTRL_RESETTING:
356 case NVME_CTRL_CONNECTING:
366 static void nvme_ns_head_submit_bio(struct bio *bio)
368 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
369 struct device *dev = disk_to_dev(head->disk);
374 * The namespace might be going away and the bio might be moved to a
375 * different queue via blk_steal_bios(), so we need to use the bio_split
376 * pool from the original queue to allocate the bvecs from.
378 bio = bio_split_to_limits(bio);
382 srcu_idx = srcu_read_lock(&head->srcu);
383 ns = nvme_find_path(head);
385 bio_set_dev(bio, ns->disk->part0);
386 bio->bi_opf |= REQ_NVME_MPATH;
387 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
388 bio->bi_iter.bi_sector);
389 submit_bio_noacct(bio);
390 } else if (nvme_available_path(head)) {
391 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
393 spin_lock_irq(&head->requeue_lock);
394 bio_list_add(&head->requeue_list, bio);
395 spin_unlock_irq(&head->requeue_lock);
397 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
402 srcu_read_unlock(&head->srcu, srcu_idx);
405 static int nvme_ns_head_open(struct gendisk *disk, fmode_t mode)
407 if (!nvme_tryget_ns_head(disk->private_data))
412 static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode)
414 nvme_put_ns_head(disk->private_data);
417 #ifdef CONFIG_BLK_DEV_ZONED
418 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
419 unsigned int nr_zones, report_zones_cb cb, void *data)
421 struct nvme_ns_head *head = disk->private_data;
423 int srcu_idx, ret = -EWOULDBLOCK;
425 srcu_idx = srcu_read_lock(&head->srcu);
426 ns = nvme_find_path(head);
428 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
429 srcu_read_unlock(&head->srcu, srcu_idx);
433 #define nvme_ns_head_report_zones NULL
434 #endif /* CONFIG_BLK_DEV_ZONED */
436 const struct block_device_operations nvme_ns_head_ops = {
437 .owner = THIS_MODULE,
438 .submit_bio = nvme_ns_head_submit_bio,
439 .open = nvme_ns_head_open,
440 .release = nvme_ns_head_release,
441 .ioctl = nvme_ns_head_ioctl,
442 .compat_ioctl = blkdev_compat_ptr_ioctl,
443 .getgeo = nvme_getgeo,
444 .report_zones = nvme_ns_head_report_zones,
445 .pr_ops = &nvme_pr_ops,
448 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
450 return container_of(cdev, struct nvme_ns_head, cdev);
453 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
455 if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
460 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
462 nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
466 static const struct file_operations nvme_ns_head_chr_fops = {
467 .owner = THIS_MODULE,
468 .open = nvme_ns_head_chr_open,
469 .release = nvme_ns_head_chr_release,
470 .unlocked_ioctl = nvme_ns_head_chr_ioctl,
471 .compat_ioctl = compat_ptr_ioctl,
472 .uring_cmd = nvme_ns_head_chr_uring_cmd,
473 .uring_cmd_iopoll = nvme_ns_head_chr_uring_cmd_iopoll,
476 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
480 head->cdev_device.parent = &head->subsys->dev;
481 ret = dev_set_name(&head->cdev_device, "ng%dn%d",
482 head->subsys->instance, head->instance);
485 ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
486 &nvme_ns_head_chr_fops, THIS_MODULE);
490 static void nvme_requeue_work(struct work_struct *work)
492 struct nvme_ns_head *head =
493 container_of(work, struct nvme_ns_head, requeue_work);
494 struct bio *bio, *next;
496 spin_lock_irq(&head->requeue_lock);
497 next = bio_list_get(&head->requeue_list);
498 spin_unlock_irq(&head->requeue_lock);
500 while ((bio = next) != NULL) {
504 submit_bio_noacct(bio);
508 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
512 mutex_init(&head->lock);
513 bio_list_init(&head->requeue_list);
514 spin_lock_init(&head->requeue_lock);
515 INIT_WORK(&head->requeue_work, nvme_requeue_work);
518 * Add a multipath node if the subsystems supports multiple controllers.
519 * We also do this for private namespaces as the namespace sharing flag
520 * could change after a rescan.
522 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
523 !nvme_is_unique_nsid(ctrl, head) || !multipath)
526 head->disk = blk_alloc_disk(ctrl->numa_node);
529 head->disk->fops = &nvme_ns_head_ops;
530 head->disk->private_data = head;
531 sprintf(head->disk->disk_name, "nvme%dn%d",
532 ctrl->subsys->instance, head->instance);
534 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
535 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
536 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
538 * This assumes all controllers that refer to a namespace either
539 * support poll queues or not. That is not a strict guarantee,
540 * but if the assumption is wrong the effect is only suboptimal
541 * performance but not correctness problem.
543 if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
544 ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
545 blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
547 /* set to a default value of 512 until the disk is validated */
548 blk_queue_logical_block_size(head->disk->queue, 512);
549 blk_set_stacking_limits(&head->disk->queue->limits);
550 blk_queue_dma_alignment(head->disk->queue, 3);
552 /* we need to propagate up the VMC settings */
553 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
555 blk_queue_write_cache(head->disk->queue, vwc, vwc);
559 static void nvme_mpath_set_live(struct nvme_ns *ns)
561 struct nvme_ns_head *head = ns->head;
568 * test_and_set_bit() is used because it is protecting against two nvme
569 * paths simultaneously calling device_add_disk() on the same namespace
572 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
573 rc = device_add_disk(&head->subsys->dev, head->disk,
574 nvme_ns_id_attr_groups);
576 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
579 nvme_add_ns_head_cdev(head);
582 mutex_lock(&head->lock);
583 if (nvme_path_is_optimized(ns)) {
586 srcu_idx = srcu_read_lock(&head->srcu);
588 __nvme_find_path(head, node);
589 srcu_read_unlock(&head->srcu, srcu_idx);
591 mutex_unlock(&head->lock);
593 synchronize_srcu(&head->srcu);
594 kblockd_schedule_work(&head->requeue_work);
597 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
598 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
601 void *base = ctrl->ana_log_buf;
602 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
605 lockdep_assert_held(&ctrl->ana_lock);
607 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
608 struct nvme_ana_group_desc *desc = base + offset;
610 size_t nsid_buf_size;
612 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
615 nr_nsids = le32_to_cpu(desc->nnsids);
616 nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
618 if (WARN_ON_ONCE(desc->grpid == 0))
620 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
622 if (WARN_ON_ONCE(desc->state == 0))
624 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
627 offset += sizeof(*desc);
628 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
631 error = cb(ctrl, desc, data);
635 offset += nsid_buf_size;
641 static inline bool nvme_state_is_live(enum nvme_ana_state state)
643 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
646 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
649 ns->ana_grpid = le32_to_cpu(desc->grpid);
650 ns->ana_state = desc->state;
651 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
653 * nvme_mpath_set_live() will trigger I/O to the multipath path device
654 * and in turn to this path device. However we cannot accept this I/O
655 * if the controller is not live. This may deadlock if called from
656 * nvme_mpath_init_identify() and the ctrl will never complete
657 * initialization, preventing I/O from completing. For this case we
658 * will reprocess the ANA log page in nvme_mpath_update() once the
659 * controller is ready.
661 if (nvme_state_is_live(ns->ana_state) &&
662 ns->ctrl->state == NVME_CTRL_LIVE)
663 nvme_mpath_set_live(ns);
666 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
667 struct nvme_ana_group_desc *desc, void *data)
669 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
670 unsigned *nr_change_groups = data;
673 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
674 le32_to_cpu(desc->grpid),
675 nvme_ana_state_names[desc->state]);
677 if (desc->state == NVME_ANA_CHANGE)
678 (*nr_change_groups)++;
683 down_read(&ctrl->namespaces_rwsem);
684 list_for_each_entry(ns, &ctrl->namespaces, list) {
687 nsid = le32_to_cpu(desc->nsids[n]);
688 if (ns->head->ns_id < nsid)
690 if (ns->head->ns_id == nsid)
691 nvme_update_ns_ana_state(desc, ns);
694 if (ns->head->ns_id > nsid)
697 up_read(&ctrl->namespaces_rwsem);
701 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
703 u32 nr_change_groups = 0;
706 mutex_lock(&ctrl->ana_lock);
707 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
708 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
710 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
714 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
715 nvme_update_ana_state);
720 * In theory we should have an ANATT timer per group as they might enter
721 * the change state at different times. But that is a lot of overhead
722 * just to protect against a target that keeps entering new changes
723 * states while never finishing previous ones. But we'll still
724 * eventually time out once all groups are in change state, so this
727 * We also double the ANATT value to provide some slack for transports
728 * or AEN processing overhead.
730 if (nr_change_groups)
731 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
733 del_timer_sync(&ctrl->anatt_timer);
735 mutex_unlock(&ctrl->ana_lock);
739 static void nvme_ana_work(struct work_struct *work)
741 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
743 if (ctrl->state != NVME_CTRL_LIVE)
746 nvme_read_ana_log(ctrl);
749 void nvme_mpath_update(struct nvme_ctrl *ctrl)
751 u32 nr_change_groups = 0;
753 if (!ctrl->ana_log_buf)
756 mutex_lock(&ctrl->ana_lock);
757 nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
758 mutex_unlock(&ctrl->ana_lock);
761 static void nvme_anatt_timeout(struct timer_list *t)
763 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
765 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
766 nvme_reset_ctrl(ctrl);
769 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
771 if (!nvme_ctrl_use_ana(ctrl))
773 del_timer_sync(&ctrl->anatt_timer);
774 cancel_work_sync(&ctrl->ana_work);
777 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \
778 struct device_attribute subsys_attr_##_name = \
779 __ATTR(_name, _mode, _show, _store)
781 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
782 struct device_attribute *attr, char *buf)
784 struct nvme_subsystem *subsys =
785 container_of(dev, struct nvme_subsystem, dev);
787 return sysfs_emit(buf, "%s\n",
788 nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
791 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
792 struct device_attribute *attr, const char *buf, size_t count)
794 struct nvme_subsystem *subsys =
795 container_of(dev, struct nvme_subsystem, dev);
798 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
799 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
800 WRITE_ONCE(subsys->iopolicy, i);
807 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
808 nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
810 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
813 return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
815 DEVICE_ATTR_RO(ana_grpid);
817 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
820 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
822 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
824 DEVICE_ATTR_RO(ana_state);
826 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
827 struct nvme_ana_group_desc *desc, void *data)
829 struct nvme_ana_group_desc *dst = data;
831 if (desc->grpid != dst->grpid)
835 return -ENXIO; /* just break out of the loop */
838 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
840 if (nvme_ctrl_use_ana(ns->ctrl)) {
841 struct nvme_ana_group_desc desc = {
846 mutex_lock(&ns->ctrl->ana_lock);
847 ns->ana_grpid = le32_to_cpu(anagrpid);
848 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
849 mutex_unlock(&ns->ctrl->ana_lock);
851 /* found the group desc: update */
852 nvme_update_ns_ana_state(&desc, ns);
854 /* group desc not found: trigger a re-read */
855 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
856 queue_work(nvme_wq, &ns->ctrl->ana_work);
859 ns->ana_state = NVME_ANA_OPTIMIZED;
860 nvme_mpath_set_live(ns);
863 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
864 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
865 ns->head->disk->queue);
866 #ifdef CONFIG_BLK_DEV_ZONED
867 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
868 ns->head->disk->nr_zones = ns->disk->nr_zones;
872 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
876 kblockd_schedule_work(&head->requeue_work);
877 if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
878 nvme_cdev_del(&head->cdev, &head->cdev_device);
879 del_gendisk(head->disk);
883 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
887 blk_mark_disk_dead(head->disk);
888 /* make sure all pending bios are cleaned up */
889 kblockd_schedule_work(&head->requeue_work);
890 flush_work(&head->requeue_work);
891 put_disk(head->disk);
894 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
896 mutex_init(&ctrl->ana_lock);
897 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
898 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
901 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
903 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
907 /* check if multipath is enabled and we have the capability */
908 if (!multipath || !ctrl->subsys ||
909 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
912 if (!ctrl->max_namespaces ||
913 ctrl->max_namespaces > le32_to_cpu(id->nn)) {
914 dev_err(ctrl->device,
915 "Invalid MNAN value %u\n", ctrl->max_namespaces);
919 ctrl->anacap = id->anacap;
920 ctrl->anatt = id->anatt;
921 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
922 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
924 ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
925 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
926 ctrl->max_namespaces * sizeof(__le32);
927 if (ana_log_size > max_transfer_size) {
928 dev_err(ctrl->device,
929 "ANA log page size (%zd) larger than MDTS (%zd).\n",
930 ana_log_size, max_transfer_size);
931 dev_err(ctrl->device, "disabling ANA support.\n");
934 if (ana_log_size > ctrl->ana_log_size) {
935 nvme_mpath_stop(ctrl);
936 nvme_mpath_uninit(ctrl);
937 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
938 if (!ctrl->ana_log_buf)
941 ctrl->ana_log_size = ana_log_size;
942 error = nvme_read_ana_log(ctrl);
948 nvme_mpath_uninit(ctrl);
952 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
954 kvfree(ctrl->ana_log_buf);
955 ctrl->ana_log_buf = NULL;
956 ctrl->ana_log_size = 0;