2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include "dm-bio-record.h"
10 #include <linux/init.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/device-mapper.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-dirty-log.h>
19 #include <linux/dm-kcopyd.h>
20 #include <linux/dm-region-hash.h>
22 #define DM_MSG_PREFIX "raid1"
24 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
26 #define DM_RAID1_HANDLE_ERRORS 0x01
27 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
29 static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
31 /*-----------------------------------------------------------------
32 * Mirror set structures.
33 *---------------------------------------------------------------*/
42 struct mirror_set *ms;
44 unsigned long error_type;
51 struct list_head list;
55 spinlock_t lock; /* protects the lists */
56 struct bio_list reads;
57 struct bio_list writes;
58 struct bio_list failures;
59 struct bio_list holds; /* bios are waiting until suspend */
61 struct dm_region_hash *rh;
62 struct dm_kcopyd_client *kcopyd_client;
63 struct dm_io_client *io_client;
72 atomic_t default_mirror; /* Default mirror */
74 struct workqueue_struct *kmirrord_wq;
75 struct work_struct kmirrord_work;
76 struct timer_list timer;
77 unsigned long timer_pending;
79 struct work_struct trigger_event;
82 struct mirror mirror[0];
85 static void wakeup_mirrord(void *context)
87 struct mirror_set *ms = context;
89 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
92 static void delayed_wake_fn(unsigned long data)
94 struct mirror_set *ms = (struct mirror_set *) data;
96 clear_bit(0, &ms->timer_pending);
100 static void delayed_wake(struct mirror_set *ms)
102 if (test_and_set_bit(0, &ms->timer_pending))
105 ms->timer.expires = jiffies + HZ / 5;
106 ms->timer.data = (unsigned long) ms;
107 ms->timer.function = delayed_wake_fn;
108 add_timer(&ms->timer);
111 static void wakeup_all_recovery_waiters(void *context)
113 wake_up_all(&_kmirrord_recovery_stopped);
116 static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
122 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
123 spin_lock_irqsave(&ms->lock, flags);
124 should_wake = !(bl->head);
125 bio_list_add(bl, bio);
126 spin_unlock_irqrestore(&ms->lock, flags);
132 static void dispatch_bios(void *context, struct bio_list *bio_list)
134 struct mirror_set *ms = context;
137 while ((bio = bio_list_pop(bio_list)))
138 queue_bio(ms, bio, WRITE);
141 struct dm_raid1_read_record {
143 struct dm_bio_details details;
147 * Every mirror should look like this one.
149 #define DEFAULT_MIRROR 0
152 * This is yucky. We squirrel the mirror struct away inside
153 * bi_next for read/write buffers. This is safe since the bh
154 * doesn't get submitted to the lower levels of block layer.
156 static struct mirror *bio_get_m(struct bio *bio)
158 return (struct mirror *) bio->bi_next;
161 static void bio_set_m(struct bio *bio, struct mirror *m)
163 bio->bi_next = (struct bio *) m;
166 static struct mirror *get_default_mirror(struct mirror_set *ms)
168 return &ms->mirror[atomic_read(&ms->default_mirror)];
171 static void set_default_mirror(struct mirror *m)
173 struct mirror_set *ms = m->ms;
174 struct mirror *m0 = &(ms->mirror[0]);
176 atomic_set(&ms->default_mirror, m - m0);
179 static struct mirror *get_valid_mirror(struct mirror_set *ms)
183 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
184 if (!atomic_read(&m->error_count))
191 * @m: mirror device to fail
192 * @error_type: one of the enum's, DM_RAID1_*_ERROR
194 * If errors are being handled, record the type of
195 * error encountered for this device. If this type
196 * of error has already been recorded, we can return;
197 * otherwise, we must signal userspace by triggering
198 * an event. Additionally, if the device is the
199 * primary device, we must choose a new primary, but
200 * only if the mirror is in-sync.
202 * This function must not block.
204 static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
206 struct mirror_set *ms = m->ms;
212 * error_count is used for nothing more than a
213 * simple way to tell if a device has encountered
216 atomic_inc(&m->error_count);
218 if (test_and_set_bit(error_type, &m->error_type))
221 if (!errors_handled(ms))
224 if (m != get_default_mirror(ms))
229 * Better to issue requests to same failing device
230 * than to risk returning corrupt data.
232 DMERR("Primary mirror (%s) failed while out-of-sync: "
233 "Reads may fail.", m->dev->name);
237 new = get_valid_mirror(ms);
239 set_default_mirror(new);
241 DMWARN("All sides of mirror have failed.");
244 schedule_work(&ms->trigger_event);
247 static int mirror_flush(struct dm_target *ti)
249 struct mirror_set *ms = ti->private;
250 unsigned long error_bits;
253 struct dm_io_region io[ms->nr_mirrors];
255 struct dm_io_request io_req = {
256 .bi_rw = WRITE_FLUSH,
257 .mem.type = DM_IO_KMEM,
258 .mem.ptr.addr = NULL,
259 .client = ms->io_client,
262 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
263 io[i].bdev = m->dev->bdev;
269 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
270 if (unlikely(error_bits != 0)) {
271 for (i = 0; i < ms->nr_mirrors; i++)
272 if (test_bit(i, &error_bits))
273 fail_mirror(ms->mirror + i,
274 DM_RAID1_FLUSH_ERROR);
281 /*-----------------------------------------------------------------
284 * When a mirror is first activated we may find that some regions
285 * are in the no-sync state. We have to recover these by
286 * recopying from the default mirror to all the others.
287 *---------------------------------------------------------------*/
288 static void recovery_complete(int read_err, unsigned long write_err,
291 struct dm_region *reg = context;
292 struct mirror_set *ms = dm_rh_region_context(reg);
296 /* Read error means the failure of default mirror. */
297 DMERR_LIMIT("Unable to read primary mirror during recovery");
298 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
302 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
305 * Bits correspond to devices (excluding default mirror).
306 * The default mirror cannot change during recovery.
308 for (m = 0; m < ms->nr_mirrors; m++) {
309 if (&ms->mirror[m] == get_default_mirror(ms))
311 if (test_bit(bit, &write_err))
312 fail_mirror(ms->mirror + m,
313 DM_RAID1_SYNC_ERROR);
318 dm_rh_recovery_end(reg, !(read_err || write_err));
321 static int recover(struct mirror_set *ms, struct dm_region *reg)
325 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
327 unsigned long flags = 0;
328 region_t key = dm_rh_get_region_key(reg);
329 sector_t region_size = dm_rh_get_region_size(ms->rh);
331 /* fill in the source */
332 m = get_default_mirror(ms);
333 from.bdev = m->dev->bdev;
334 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
335 if (key == (ms->nr_regions - 1)) {
337 * The final region may be smaller than
340 from.count = ms->ti->len & (region_size - 1);
342 from.count = region_size;
344 from.count = region_size;
346 /* fill in the destinations */
347 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
348 if (&ms->mirror[i] == get_default_mirror(ms))
352 dest->bdev = m->dev->bdev;
353 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
354 dest->count = from.count;
359 if (!errors_handled(ms))
360 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
362 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
363 flags, recovery_complete, reg);
368 static void do_recovery(struct mirror_set *ms)
370 struct dm_region *reg;
371 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
375 * Start quiescing some regions.
377 dm_rh_recovery_prepare(ms->rh);
380 * Copy any already quiesced regions.
382 while ((reg = dm_rh_recovery_start(ms->rh))) {
383 r = recover(ms, reg);
385 dm_rh_recovery_end(reg, 0);
389 * Update the in sync flag.
392 (log->type->get_sync_count(log) == ms->nr_regions)) {
393 /* the sync is complete */
394 dm_table_event(ms->ti->table);
399 /*-----------------------------------------------------------------
401 *---------------------------------------------------------------*/
402 static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
404 struct mirror *m = get_default_mirror(ms);
407 if (likely(!atomic_read(&m->error_count)))
410 if (m-- == ms->mirror)
412 } while (m != get_default_mirror(ms));
417 static int default_ok(struct mirror *m)
419 struct mirror *default_mirror = get_default_mirror(m->ms);
421 return !atomic_read(&default_mirror->error_count);
424 static int mirror_available(struct mirror_set *ms, struct bio *bio)
426 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
427 region_t region = dm_rh_bio_to_region(ms->rh, bio);
429 if (log->type->in_sync(log, region, 0))
430 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
436 * remap a buffer to a particular mirror.
438 static sector_t map_sector(struct mirror *m, struct bio *bio)
440 if (unlikely(!bio->bi_size))
442 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector);
445 static void map_bio(struct mirror *m, struct bio *bio)
447 bio->bi_bdev = m->dev->bdev;
448 bio->bi_sector = map_sector(m, bio);
451 static void map_region(struct dm_io_region *io, struct mirror *m,
454 io->bdev = m->dev->bdev;
455 io->sector = map_sector(m, bio);
456 io->count = bio->bi_size >> 9;
459 static void hold_bio(struct mirror_set *ms, struct bio *bio)
462 * Lock is required to avoid race condition during suspend
465 spin_lock_irq(&ms->lock);
467 if (atomic_read(&ms->suspend)) {
468 spin_unlock_irq(&ms->lock);
471 * If device is suspended, complete the bio.
473 if (dm_noflush_suspending(ms->ti))
474 bio_endio(bio, DM_ENDIO_REQUEUE);
476 bio_endio(bio, -EIO);
481 * Hold bio until the suspend is complete.
483 bio_list_add(&ms->holds, bio);
484 spin_unlock_irq(&ms->lock);
487 /*-----------------------------------------------------------------
489 *---------------------------------------------------------------*/
490 static void read_callback(unsigned long error, void *context)
492 struct bio *bio = context;
496 bio_set_m(bio, NULL);
498 if (likely(!error)) {
503 fail_mirror(m, DM_RAID1_READ_ERROR);
505 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
506 DMWARN_LIMIT("Read failure on mirror device %s. "
507 "Trying alternative device.",
509 queue_bio(m->ms, bio, bio_rw(bio));
513 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
515 bio_endio(bio, -EIO);
518 /* Asynchronous read. */
519 static void read_async_bio(struct mirror *m, struct bio *bio)
521 struct dm_io_region io;
522 struct dm_io_request io_req = {
524 .mem.type = DM_IO_BVEC,
525 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
526 .notify.fn = read_callback,
527 .notify.context = bio,
528 .client = m->ms->io_client,
531 map_region(&io, m, bio);
533 BUG_ON(dm_io(&io_req, 1, &io, NULL));
536 static inline int region_in_sync(struct mirror_set *ms, region_t region,
539 int state = dm_rh_get_state(ms->rh, region, may_block);
540 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
543 static void do_reads(struct mirror_set *ms, struct bio_list *reads)
549 while ((bio = bio_list_pop(reads))) {
550 region = dm_rh_bio_to_region(ms->rh, bio);
551 m = get_default_mirror(ms);
554 * We can only read balance if the region is in sync.
556 if (likely(region_in_sync(ms, region, 1)))
557 m = choose_mirror(ms, bio->bi_sector);
558 else if (m && atomic_read(&m->error_count))
562 read_async_bio(m, bio);
564 bio_endio(bio, -EIO);
568 /*-----------------------------------------------------------------
571 * We do different things with the write io depending on the
572 * state of the region that it's in:
574 * SYNC: increment pending, use kcopyd to write to *all* mirrors
575 * RECOVERING: delay the io until recovery completes
576 * NOSYNC: increment pending, just write to the default mirror
577 *---------------------------------------------------------------*/
580 static void write_callback(unsigned long error, void *context)
583 struct bio *bio = (struct bio *) context;
584 struct mirror_set *ms;
588 ms = bio_get_m(bio)->ms;
589 bio_set_m(bio, NULL);
592 * NOTE: We don't decrement the pending count here,
593 * instead it is done by the targets endio function.
594 * This way we handle both writes to SYNC and NOSYNC
595 * regions with the same code.
597 if (likely(!error)) {
602 for (i = 0; i < ms->nr_mirrors; i++)
603 if (test_bit(i, &error))
604 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
607 * Need to raise event. Since raising
608 * events can block, we need to do it in
611 spin_lock_irqsave(&ms->lock, flags);
612 if (!ms->failures.head)
614 bio_list_add(&ms->failures, bio);
615 spin_unlock_irqrestore(&ms->lock, flags);
620 static void do_write(struct mirror_set *ms, struct bio *bio)
623 struct dm_io_region io[ms->nr_mirrors], *dest = io;
625 struct dm_io_request io_req = {
626 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
627 .mem.type = DM_IO_BVEC,
628 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
629 .notify.fn = write_callback,
630 .notify.context = bio,
631 .client = ms->io_client,
634 if (bio->bi_rw & REQ_DISCARD) {
635 io_req.bi_rw |= REQ_DISCARD;
636 io_req.mem.type = DM_IO_KMEM;
637 io_req.mem.ptr.addr = NULL;
640 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
641 map_region(dest++, m, bio);
644 * Use default mirror because we only need it to retrieve the reference
645 * to the mirror set in write_callback().
647 bio_set_m(bio, get_default_mirror(ms));
649 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
652 static void do_writes(struct mirror_set *ms, struct bio_list *writes)
656 struct bio_list sync, nosync, recover, *this_list = NULL;
657 struct bio_list requeue;
658 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
665 * Classify each write.
667 bio_list_init(&sync);
668 bio_list_init(&nosync);
669 bio_list_init(&recover);
670 bio_list_init(&requeue);
672 while ((bio = bio_list_pop(writes))) {
673 if ((bio->bi_rw & REQ_FLUSH) ||
674 (bio->bi_rw & REQ_DISCARD)) {
675 bio_list_add(&sync, bio);
679 region = dm_rh_bio_to_region(ms->rh, bio);
681 if (log->type->is_remote_recovering &&
682 log->type->is_remote_recovering(log, region)) {
683 bio_list_add(&requeue, bio);
687 state = dm_rh_get_state(ms->rh, region, 1);
698 case DM_RH_RECOVERING:
699 this_list = &recover;
703 bio_list_add(this_list, bio);
707 * Add bios that are delayed due to remote recovery
708 * back on to the write queue
710 if (unlikely(requeue.head)) {
711 spin_lock_irq(&ms->lock);
712 bio_list_merge(&ms->writes, &requeue);
713 spin_unlock_irq(&ms->lock);
718 * Increment the pending counts for any regions that will
719 * be written to (writes to recover regions are going to
722 dm_rh_inc_pending(ms->rh, &sync);
723 dm_rh_inc_pending(ms->rh, &nosync);
726 * If the flush fails on a previous call and succeeds here,
727 * we must not reset the log_failure variable. We need
728 * userspace interaction to do that.
730 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
735 if (unlikely(ms->log_failure) && errors_handled(ms)) {
736 spin_lock_irq(&ms->lock);
737 bio_list_merge(&ms->failures, &sync);
738 spin_unlock_irq(&ms->lock);
741 while ((bio = bio_list_pop(&sync)))
744 while ((bio = bio_list_pop(&recover)))
745 dm_rh_delay(ms->rh, bio);
747 while ((bio = bio_list_pop(&nosync))) {
748 if (unlikely(ms->leg_failure) && errors_handled(ms)) {
749 spin_lock_irq(&ms->lock);
750 bio_list_add(&ms->failures, bio);
751 spin_unlock_irq(&ms->lock);
754 map_bio(get_default_mirror(ms), bio);
755 generic_make_request(bio);
760 static void do_failures(struct mirror_set *ms, struct bio_list *failures)
764 if (likely(!failures->head))
768 * If the log has failed, unattempted writes are being
769 * put on the holds list. We can't issue those writes
770 * until a log has been marked, so we must store them.
772 * If a 'noflush' suspend is in progress, we can requeue
773 * the I/O's to the core. This give userspace a chance
774 * to reconfigure the mirror, at which point the core
775 * will reissue the writes. If the 'noflush' flag is
776 * not set, we have no choice but to return errors.
778 * Some writes on the failures list may have been
779 * submitted before the log failure and represent a
780 * failure to write to one of the devices. It is ok
781 * for us to treat them the same and requeue them
784 while ((bio = bio_list_pop(failures))) {
785 if (!ms->log_failure) {
787 dm_rh_mark_nosync(ms->rh, bio);
791 * If all the legs are dead, fail the I/O.
792 * If we have been told to handle errors, hold the bio
793 * and wait for userspace to deal with the problem.
794 * Otherwise pretend that the I/O succeeded. (This would
795 * be wrong if the failed leg returned after reboot and
796 * got replicated back to the good legs.)
798 if (!get_valid_mirror(ms))
799 bio_endio(bio, -EIO);
800 else if (errors_handled(ms))
807 static void trigger_event(struct work_struct *work)
809 struct mirror_set *ms =
810 container_of(work, struct mirror_set, trigger_event);
812 dm_table_event(ms->ti->table);
815 /*-----------------------------------------------------------------
817 *---------------------------------------------------------------*/
818 static void do_mirror(struct work_struct *work)
820 struct mirror_set *ms = container_of(work, struct mirror_set,
822 struct bio_list reads, writes, failures;
825 spin_lock_irqsave(&ms->lock, flags);
828 failures = ms->failures;
829 bio_list_init(&ms->reads);
830 bio_list_init(&ms->writes);
831 bio_list_init(&ms->failures);
832 spin_unlock_irqrestore(&ms->lock, flags);
834 dm_rh_update_states(ms->rh, errors_handled(ms));
836 do_reads(ms, &reads);
837 do_writes(ms, &writes);
838 do_failures(ms, &failures);
841 /*-----------------------------------------------------------------
843 *---------------------------------------------------------------*/
844 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
845 uint32_t region_size,
846 struct dm_target *ti,
847 struct dm_dirty_log *dl)
850 struct mirror_set *ms = NULL;
852 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
854 ms = kzalloc(len, GFP_KERNEL);
856 ti->error = "Cannot allocate mirror context";
860 spin_lock_init(&ms->lock);
861 bio_list_init(&ms->reads);
862 bio_list_init(&ms->writes);
863 bio_list_init(&ms->failures);
864 bio_list_init(&ms->holds);
867 ms->nr_mirrors = nr_mirrors;
868 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
872 atomic_set(&ms->suspend, 0);
873 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
875 ms->io_client = dm_io_client_create();
876 if (IS_ERR(ms->io_client)) {
877 ti->error = "Error creating dm_io client";
882 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
883 wakeup_all_recovery_waiters,
884 ms->ti->begin, MAX_RECOVERY,
885 dl, region_size, ms->nr_regions);
886 if (IS_ERR(ms->rh)) {
887 ti->error = "Error creating dirty region hash";
888 dm_io_client_destroy(ms->io_client);
896 static void free_context(struct mirror_set *ms, struct dm_target *ti,
900 dm_put_device(ti, ms->mirror[m].dev);
902 dm_io_client_destroy(ms->io_client);
903 dm_region_hash_destroy(ms->rh);
907 static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
908 unsigned int mirror, char **argv)
910 unsigned long long offset;
913 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
914 ti->error = "Invalid offset";
918 if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
919 &ms->mirror[mirror].dev)) {
920 ti->error = "Device lookup failure";
924 ms->mirror[mirror].ms = ms;
925 atomic_set(&(ms->mirror[mirror].error_count), 0);
926 ms->mirror[mirror].error_type = 0;
927 ms->mirror[mirror].offset = offset;
933 * Create dirty log: log_type #log_params <log_params>
935 static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
936 unsigned argc, char **argv,
939 unsigned param_count;
940 struct dm_dirty_log *dl;
944 ti->error = "Insufficient mirror log arguments";
948 if (sscanf(argv[1], "%u%c", ¶m_count, &dummy) != 1) {
949 ti->error = "Invalid mirror log argument count";
953 *args_used = 2 + param_count;
955 if (argc < *args_used) {
956 ti->error = "Insufficient mirror log arguments";
960 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
963 ti->error = "Error creating mirror dirty log";
970 static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
973 unsigned num_features;
974 struct dm_target *ti = ms->ti;
982 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
983 ti->error = "Invalid number of features";
991 if (num_features > argc) {
992 ti->error = "Not enough arguments to support feature count";
996 if (!strcmp("handle_errors", argv[0]))
997 ms->features |= DM_RAID1_HANDLE_ERRORS;
999 ti->error = "Unrecognised feature requested";
1009 * Construct a mirror mapping:
1011 * log_type #log_params <log_params>
1012 * #mirrors [mirror_path offset]{2,}
1013 * [#features <features>]
1015 * log_type is "core" or "disk"
1016 * #log_params is between 1 and 3
1018 * If present, features must be "handle_errors".
1020 static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1023 unsigned int nr_mirrors, m, args_used;
1024 struct mirror_set *ms;
1025 struct dm_dirty_log *dl;
1028 dl = create_dirty_log(ti, argc, argv, &args_used);
1035 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
1036 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
1037 ti->error = "Invalid number of mirrors";
1038 dm_dirty_log_destroy(dl);
1044 if (argc < nr_mirrors * 2) {
1045 ti->error = "Too few mirror arguments";
1046 dm_dirty_log_destroy(dl);
1050 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1052 dm_dirty_log_destroy(dl);
1056 /* Get the mirror parameter sets */
1057 for (m = 0; m < nr_mirrors; m++) {
1058 r = get_mirror(ms, ti, m, argv);
1060 free_context(ms, ti, m);
1069 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1071 goto err_free_context;
1073 ti->num_flush_requests = 1;
1074 ti->num_discard_requests = 1;
1075 ti->per_bio_data_size = sizeof(struct dm_raid1_read_record);
1076 ti->discard_zeroes_data_unsupported = true;
1078 ms->kmirrord_wq = alloc_workqueue("kmirrord",
1079 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1080 if (!ms->kmirrord_wq) {
1081 DMERR("couldn't start kmirrord");
1083 goto err_free_context;
1085 INIT_WORK(&ms->kmirrord_work, do_mirror);
1086 init_timer(&ms->timer);
1087 ms->timer_pending = 0;
1088 INIT_WORK(&ms->trigger_event, trigger_event);
1090 r = parse_features(ms, argc, argv, &args_used);
1092 goto err_destroy_wq;
1098 * Any read-balancing addition depends on the
1099 * DM_RAID1_HANDLE_ERRORS flag being present.
1100 * This is because the decision to balance depends
1101 * on the sync state of a region. If the above
1102 * flag is not present, we ignore errors; and
1103 * the sync state may be inaccurate.
1107 ti->error = "Too many mirror arguments";
1109 goto err_destroy_wq;
1112 ms->kcopyd_client = dm_kcopyd_client_create();
1113 if (IS_ERR(ms->kcopyd_client)) {
1114 r = PTR_ERR(ms->kcopyd_client);
1115 goto err_destroy_wq;
1122 destroy_workqueue(ms->kmirrord_wq);
1124 free_context(ms, ti, ms->nr_mirrors);
1128 static void mirror_dtr(struct dm_target *ti)
1130 struct mirror_set *ms = (struct mirror_set *) ti->private;
1132 del_timer_sync(&ms->timer);
1133 flush_workqueue(ms->kmirrord_wq);
1134 flush_work(&ms->trigger_event);
1135 dm_kcopyd_client_destroy(ms->kcopyd_client);
1136 destroy_workqueue(ms->kmirrord_wq);
1137 free_context(ms, ti, ms->nr_mirrors);
1141 * Mirror mapping function
1143 static int mirror_map(struct dm_target *ti, struct bio *bio,
1144 union map_info *map_context)
1146 int r, rw = bio_rw(bio);
1148 struct mirror_set *ms = ti->private;
1149 struct dm_raid1_read_record *read_record;
1150 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1153 /* Save region for mirror_end_io() handler */
1154 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1155 queue_bio(ms, bio, rw);
1156 return DM_MAPIO_SUBMITTED;
1159 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1160 if (r < 0 && r != -EWOULDBLOCK)
1164 * If region is not in-sync queue the bio.
1166 if (!r || (r == -EWOULDBLOCK)) {
1168 return -EWOULDBLOCK;
1170 queue_bio(ms, bio, rw);
1171 return DM_MAPIO_SUBMITTED;
1175 * The region is in-sync and we can perform reads directly.
1176 * Store enough information so we can retry if it fails.
1178 m = choose_mirror(ms, bio->bi_sector);
1182 read_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_read_record));
1183 dm_bio_record(&read_record->details, bio);
1184 map_context->ptr = read_record;
1189 return DM_MAPIO_REMAPPED;
1192 static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1193 int error, union map_info *map_context)
1195 int rw = bio_rw(bio);
1196 struct mirror_set *ms = (struct mirror_set *) ti->private;
1197 struct mirror *m = NULL;
1198 struct dm_bio_details *bd = NULL;
1199 struct dm_raid1_read_record *read_record = map_context->ptr;
1202 * We need to dec pending if this was a write.
1205 if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
1206 dm_rh_dec(ms->rh, map_context->ll);
1210 if (error == -EOPNOTSUPP)
1213 if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
1216 if (unlikely(error)) {
1219 * There wasn't enough memory to record necessary
1220 * information for a retry or there was no other
1223 DMERR_LIMIT("Mirror read failed.");
1229 DMERR("Mirror read failed from %s. Trying alternative device.",
1232 fail_mirror(m, DM_RAID1_READ_ERROR);
1235 * A failed read is requeued for another attempt using an intact
1238 if (default_ok(m) || mirror_available(ms, bio)) {
1239 bd = &read_record->details;
1241 dm_bio_restore(bd, bio);
1242 map_context->ptr = NULL;
1243 queue_bio(ms, bio, rw);
1244 return DM_ENDIO_INCOMPLETE;
1246 DMERR("All replicated volumes dead, failing I/O");
1250 map_context->ptr = NULL;
1255 static void mirror_presuspend(struct dm_target *ti)
1257 struct mirror_set *ms = (struct mirror_set *) ti->private;
1258 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1260 struct bio_list holds;
1263 atomic_set(&ms->suspend, 1);
1266 * Process bios in the hold list to start recovery waiting
1267 * for bios in the hold list. After the process, no bio has
1268 * a chance to be added in the hold list because ms->suspend
1271 spin_lock_irq(&ms->lock);
1273 bio_list_init(&ms->holds);
1274 spin_unlock_irq(&ms->lock);
1276 while ((bio = bio_list_pop(&holds)))
1280 * We must finish up all the work that we've
1281 * generated (i.e. recovery work).
1283 dm_rh_stop_recovery(ms->rh);
1285 wait_event(_kmirrord_recovery_stopped,
1286 !dm_rh_recovery_in_flight(ms->rh));
1288 if (log->type->presuspend && log->type->presuspend(log))
1289 /* FIXME: need better error handling */
1290 DMWARN("log presuspend failed");
1293 * Now that recovery is complete/stopped and the
1294 * delayed bios are queued, we need to wait for
1295 * the worker thread to complete. This way,
1296 * we know that all of our I/O has been pushed.
1298 flush_workqueue(ms->kmirrord_wq);
1301 static void mirror_postsuspend(struct dm_target *ti)
1303 struct mirror_set *ms = ti->private;
1304 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1306 if (log->type->postsuspend && log->type->postsuspend(log))
1307 /* FIXME: need better error handling */
1308 DMWARN("log postsuspend failed");
1311 static void mirror_resume(struct dm_target *ti)
1313 struct mirror_set *ms = ti->private;
1314 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1316 atomic_set(&ms->suspend, 0);
1317 if (log->type->resume && log->type->resume(log))
1318 /* FIXME: need better error handling */
1319 DMWARN("log resume failed");
1320 dm_rh_start_recovery(ms->rh);
1324 * device_status_char
1325 * @m: mirror device/leg we want the status of
1327 * We return one character representing the most severe error
1328 * we have encountered.
1329 * A => Alive - No failures
1330 * D => Dead - A write failure occurred leaving mirror out-of-sync
1331 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1332 * R => Read - A read failure occurred, mirror data unaffected
1336 static char device_status_char(struct mirror *m)
1338 if (!atomic_read(&(m->error_count)))
1341 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1342 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
1343 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1344 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1348 static int mirror_status(struct dm_target *ti, status_type_t type,
1349 unsigned status_flags, char *result, unsigned maxlen)
1351 unsigned int m, sz = 0;
1352 struct mirror_set *ms = (struct mirror_set *) ti->private;
1353 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1354 char buffer[ms->nr_mirrors + 1];
1357 case STATUSTYPE_INFO:
1358 DMEMIT("%d ", ms->nr_mirrors);
1359 for (m = 0; m < ms->nr_mirrors; m++) {
1360 DMEMIT("%s ", ms->mirror[m].dev->name);
1361 buffer[m] = device_status_char(&(ms->mirror[m]));
1365 DMEMIT("%llu/%llu 1 %s ",
1366 (unsigned long long)log->type->get_sync_count(log),
1367 (unsigned long long)ms->nr_regions, buffer);
1369 sz += log->type->status(log, type, result+sz, maxlen-sz);
1373 case STATUSTYPE_TABLE:
1374 sz = log->type->status(log, type, result, maxlen);
1376 DMEMIT("%d", ms->nr_mirrors);
1377 for (m = 0; m < ms->nr_mirrors; m++)
1378 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
1379 (unsigned long long)ms->mirror[m].offset);
1381 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1382 DMEMIT(" 1 handle_errors");
1388 static int mirror_iterate_devices(struct dm_target *ti,
1389 iterate_devices_callout_fn fn, void *data)
1391 struct mirror_set *ms = ti->private;
1395 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1396 ret = fn(ti, ms->mirror[i].dev,
1397 ms->mirror[i].offset, ti->len, data);
1402 static struct target_type mirror_target = {
1404 .version = {1, 13, 1},
1405 .module = THIS_MODULE,
1409 .end_io = mirror_end_io,
1410 .presuspend = mirror_presuspend,
1411 .postsuspend = mirror_postsuspend,
1412 .resume = mirror_resume,
1413 .status = mirror_status,
1414 .iterate_devices = mirror_iterate_devices,
1417 static int __init dm_mirror_init(void)
1421 r = dm_register_target(&mirror_target);
1423 DMERR("Failed to register mirror target");
1433 static void __exit dm_mirror_exit(void)
1435 dm_unregister_target(&mirror_target);
1439 module_init(dm_mirror_init);
1440 module_exit(dm_mirror_exit);
1442 MODULE_DESCRIPTION(DM_NAME " mirror target");
1443 MODULE_AUTHOR("Joe Thornber");
1444 MODULE_LICENSE("GPL");