1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * multipath.c : Multiple Devices driver for Linux
5 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
7 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
9 * MULTIPATH management functions.
11 * derived from raid1.c.
14 #include <linux/blkdev.h>
15 #include <linux/module.h>
16 #include <linux/raid/md_u.h>
17 #include <linux/seq_file.h>
18 #include <linux/slab.h>
20 #include "md-multipath.h"
22 #define MAX_WORK_PER_DISK 128
24 #define NR_RESERVED_BUFS 32
26 static int multipath_map (struct mpconf *conf)
28 int i, disks = conf->raid_disks;
31 * Later we do read balancing on the read side
32 * now we use the first available disk.
36 for (i = 0; i < disks; i++) {
37 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
38 if (rdev && test_bit(In_sync, &rdev->flags) &&
39 !test_bit(Faulty, &rdev->flags)) {
40 atomic_inc(&rdev->nr_pending);
47 pr_crit_ratelimited("multipath_map(): no more operational IO paths?\n");
51 static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
54 struct mddev *mddev = mp_bh->mddev;
55 struct mpconf *conf = mddev->private;
57 spin_lock_irqsave(&conf->device_lock, flags);
58 list_add(&mp_bh->retry_list, &conf->retry_list);
59 spin_unlock_irqrestore(&conf->device_lock, flags);
60 md_wakeup_thread(mddev->thread);
64 * multipath_end_bh_io() is called when we have finished servicing a multipathed
65 * operation and are ready to return a success/failure code to the buffer
68 static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
70 struct bio *bio = mp_bh->master_bio;
71 struct mpconf *conf = mp_bh->mddev->private;
73 bio->bi_status = status;
75 mempool_free(mp_bh, &conf->pool);
78 static void multipath_end_request(struct bio *bio)
80 struct multipath_bh *mp_bh = bio->bi_private;
81 struct mpconf *conf = mp_bh->mddev->private;
82 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
85 multipath_end_bh_io(mp_bh, 0);
86 else if (!(bio->bi_opf & REQ_RAHEAD)) {
90 char b[BDEVNAME_SIZE];
91 md_error (mp_bh->mddev, rdev);
92 pr_info("multipath: %s: rescheduling sector %llu\n",
93 bdevname(rdev->bdev,b),
94 (unsigned long long)bio->bi_iter.bi_sector);
95 multipath_reschedule_retry(mp_bh);
97 multipath_end_bh_io(mp_bh, bio->bi_status);
98 rdev_dec_pending(rdev, conf->mddev);
101 static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
103 struct mpconf *conf = mddev->private;
104 struct multipath_bh * mp_bh;
105 struct multipath_info *multipath;
107 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
108 && md_flush_request(mddev, bio))
111 mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
113 mp_bh->master_bio = bio;
114 mp_bh->mddev = mddev;
116 mp_bh->path = multipath_map(conf);
117 if (mp_bh->path < 0) {
119 mempool_free(mp_bh, &conf->pool);
122 multipath = conf->multipaths + mp_bh->path;
124 bio_init(&mp_bh->bio, NULL, 0);
125 __bio_clone_fast(&mp_bh->bio, bio);
127 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
128 bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
129 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
130 mp_bh->bio.bi_end_io = multipath_end_request;
131 mp_bh->bio.bi_private = mp_bh;
132 mddev_check_writesame(mddev, &mp_bh->bio);
133 mddev_check_write_zeroes(mddev, &mp_bh->bio);
134 generic_make_request(&mp_bh->bio);
138 static void multipath_status(struct seq_file *seq, struct mddev *mddev)
140 struct mpconf *conf = mddev->private;
143 seq_printf (seq, " [%d/%d] [", conf->raid_disks,
144 conf->raid_disks - mddev->degraded);
146 for (i = 0; i < conf->raid_disks; i++) {
147 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
148 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
154 static int multipath_congested(struct mddev *mddev, int bits)
156 struct mpconf *conf = mddev->private;
160 for (i = 0; i < mddev->raid_disks ; i++) {
161 struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
162 if (rdev && !test_bit(Faulty, &rdev->flags)) {
163 struct request_queue *q = bdev_get_queue(rdev->bdev);
165 ret |= bdi_congested(q->backing_dev_info, bits);
166 /* Just like multipath_map, we just check the
167 * first available device
177 * Careful, this can execute in IRQ contexts as well!
179 static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
181 struct mpconf *conf = mddev->private;
182 char b[BDEVNAME_SIZE];
184 if (conf->raid_disks - mddev->degraded <= 1) {
186 * Uh oh, we can do nothing if this is our last path, but
187 * first check if this is a queued request for a device
188 * which has just failed.
190 pr_warn("multipath: only one IO path left and IO error.\n");
191 /* leave it active... it's all we have */
195 * Mark disk as unusable
197 if (test_and_clear_bit(In_sync, &rdev->flags)) {
199 spin_lock_irqsave(&conf->device_lock, flags);
201 spin_unlock_irqrestore(&conf->device_lock, flags);
203 set_bit(Faulty, &rdev->flags);
204 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
205 pr_err("multipath: IO failure on %s, disabling IO path.\n"
206 "multipath: Operation continuing on %d IO paths.\n",
207 bdevname(rdev->bdev, b),
208 conf->raid_disks - mddev->degraded);
211 static void print_multipath_conf (struct mpconf *conf)
214 struct multipath_info *tmp;
216 pr_debug("MULTIPATH conf printout:\n");
218 pr_debug("(conf==NULL)\n");
221 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
224 for (i = 0; i < conf->raid_disks; i++) {
225 char b[BDEVNAME_SIZE];
226 tmp = conf->multipaths + i;
228 pr_debug(" disk%d, o:%d, dev:%s\n",
229 i,!test_bit(Faulty, &tmp->rdev->flags),
230 bdevname(tmp->rdev->bdev,b));
234 static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
236 struct mpconf *conf = mddev->private;
239 struct multipath_info *p;
241 int last = mddev->raid_disks - 1;
243 if (rdev->raid_disk >= 0)
244 first = last = rdev->raid_disk;
246 print_multipath_conf(conf);
248 for (path = first; path <= last; path++)
249 if ((p=conf->multipaths+path)->rdev == NULL) {
250 disk_stack_limits(mddev->gendisk, rdev->bdev,
251 rdev->data_offset << 9);
253 err = md_integrity_add_rdev(rdev, mddev);
256 spin_lock_irq(&conf->device_lock);
258 rdev->raid_disk = path;
259 set_bit(In_sync, &rdev->flags);
260 spin_unlock_irq(&conf->device_lock);
261 rcu_assign_pointer(p->rdev, rdev);
266 print_multipath_conf(conf);
271 static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
273 struct mpconf *conf = mddev->private;
275 int number = rdev->raid_disk;
276 struct multipath_info *p = conf->multipaths + number;
278 print_multipath_conf(conf);
280 if (rdev == p->rdev) {
281 if (test_bit(In_sync, &rdev->flags) ||
282 atomic_read(&rdev->nr_pending)) {
283 pr_warn("hot-remove-disk, slot %d is identified but is still operational!\n", number);
288 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
290 if (atomic_read(&rdev->nr_pending)) {
291 /* lost the race, try later */
297 err = md_integrity_register(mddev);
301 print_multipath_conf(conf);
306 * This is a kernel thread which:
308 * 1. Retries failed read operations on working multipaths.
309 * 2. Updates the raid superblock when problems encounter.
310 * 3. Performs writes following reads for array syncronising.
313 static void multipathd(struct md_thread *thread)
315 struct mddev *mddev = thread->mddev;
316 struct multipath_bh *mp_bh;
319 struct mpconf *conf = mddev->private;
320 struct list_head *head = &conf->retry_list;
322 md_check_recovery(mddev);
324 char b[BDEVNAME_SIZE];
325 spin_lock_irqsave(&conf->device_lock, flags);
326 if (list_empty(head))
328 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
329 list_del(head->prev);
330 spin_unlock_irqrestore(&conf->device_lock, flags);
333 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
335 if ((mp_bh->path = multipath_map (conf))<0) {
336 pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
338 (unsigned long long)bio->bi_iter.bi_sector);
339 multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
341 pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
343 (unsigned long long)bio->bi_iter.bi_sector);
344 *bio = *(mp_bh->master_bio);
345 bio->bi_iter.bi_sector +=
346 conf->multipaths[mp_bh->path].rdev->data_offset;
347 bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
348 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
349 bio->bi_end_io = multipath_end_request;
350 bio->bi_private = mp_bh;
351 generic_make_request(bio);
354 spin_unlock_irqrestore(&conf->device_lock, flags);
357 static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
359 WARN_ONCE(sectors || raid_disks,
360 "%s does not support generic reshape\n", __func__);
362 return mddev->dev_sectors;
365 static int multipath_run (struct mddev *mddev)
369 struct multipath_info *disk;
370 struct md_rdev *rdev;
374 if (md_check_no_bitmap(mddev))
377 if (mddev->level != LEVEL_MULTIPATH) {
378 pr_warn("multipath: %s: raid level not set to multipath IO (%d)\n",
379 mdname(mddev), mddev->level);
383 * copy the already verified devices into our private MULTIPATH
384 * bookkeeping area. [whatever we allocate in multipath_run(),
385 * should be freed in multipath_free()]
388 conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
389 mddev->private = conf;
393 conf->multipaths = kcalloc(mddev->raid_disks,
394 sizeof(struct multipath_info),
396 if (!conf->multipaths)
400 rdev_for_each(rdev, mddev) {
401 disk_idx = rdev->raid_disk;
403 disk_idx >= mddev->raid_disks)
406 disk = conf->multipaths + disk_idx;
408 disk_stack_limits(mddev->gendisk, rdev->bdev,
409 rdev->data_offset << 9);
411 if (!test_bit(Faulty, &rdev->flags))
415 conf->raid_disks = mddev->raid_disks;
417 spin_lock_init(&conf->device_lock);
418 INIT_LIST_HEAD(&conf->retry_list);
420 if (!working_disks) {
421 pr_warn("multipath: no operational IO paths for %s\n",
425 mddev->degraded = conf->raid_disks - working_disks;
427 ret = mempool_init_kmalloc_pool(&conf->pool, NR_RESERVED_BUFS,
428 sizeof(struct multipath_bh));
432 mddev->thread = md_register_thread(multipathd, mddev,
437 pr_info("multipath: array %s active with %d out of %d IO paths\n",
438 mdname(mddev), conf->raid_disks - mddev->degraded,
441 * Ok, everything is just fine now
443 md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
445 if (md_integrity_register(mddev))
451 mempool_exit(&conf->pool);
452 kfree(conf->multipaths);
454 mddev->private = NULL;
459 static void multipath_free(struct mddev *mddev, void *priv)
461 struct mpconf *conf = priv;
463 mempool_exit(&conf->pool);
464 kfree(conf->multipaths);
468 static struct md_personality multipath_personality =
471 .level = LEVEL_MULTIPATH,
472 .owner = THIS_MODULE,
473 .make_request = multipath_make_request,
474 .run = multipath_run,
475 .free = multipath_free,
476 .status = multipath_status,
477 .error_handler = multipath_error,
478 .hot_add_disk = multipath_add_disk,
479 .hot_remove_disk= multipath_remove_disk,
480 .size = multipath_size,
481 .congested = multipath_congested,
484 static int __init multipath_init (void)
486 return register_md_personality (&multipath_personality);
489 static void __exit multipath_exit (void)
491 unregister_md_personality (&multipath_personality);
494 module_init(multipath_init);
495 module_exit(multipath_exit);
496 MODULE_LICENSE("GPL");
497 MODULE_DESCRIPTION("simple multi-path personality for MD");
498 MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
499 MODULE_ALIAS("md-multipath");
500 MODULE_ALIAS("md-level--4");