2 md.c : Multiple Devices driver for Linux
3 Copyright (C) 1998, 1999, 2000 Ingo Molnar
5 completely rewritten, based on the MD driver code from Marc Zyngier
9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13 - kmod support by: Cyrus Durgin
14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17 - lots of fixes and improvements to the RAID1/RAID5 and generic
18 RAID code (such as request based resynchronization):
20 Neil Brown <neilb@cse.unsw.edu.au>.
22 - persistent bitmap code
23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25 This program is free software; you can redistribute it and/or modify
26 it under the terms of the GNU General Public License as published by
27 the Free Software Foundation; either version 2, or (at your option)
30 You should have received a copy of the GNU General Public License
31 (for example /usr/src/linux/COPYING); if not, write to the Free
32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/buffer_head.h> /* for invalidate_bdev */
43 #include <linux/poll.h>
44 #include <linux/mutex.h>
45 #include <linux/ctype.h>
46 #include <linux/freezer.h>
48 #include <linux/init.h>
50 #include <linux/file.h>
53 #include <linux/kmod.h>
56 #include <asm/unaligned.h>
58 #define MAJOR_NR MD_MAJOR
61 /* 63 partitions with the alternate major number (mdp) */
62 #define MdpMinorShift 6
65 #define dprintk(x...) ((void)(DEBUG && printk(x)))
69 static void autostart_arrays (int part);
72 static LIST_HEAD(pers_list);
73 static DEFINE_SPINLOCK(pers_lock);
75 static void md_print_devices(void);
77 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
80 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
81 * is 1000 KB/sec, so the extra system load does not show up that much.
82 * Increase it if you want to have more _guaranteed_ speed. Note that
83 * the RAID driver will use the maximum available bandwidth if the IO
84 * subsystem is idle. There is also an 'absolute maximum' reconstruction
85 * speed limit - in case reconstruction slows down your system despite
88 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
89 * or /sys/block/mdX/md/sync_speed_{min,max}
92 static int sysctl_speed_limit_min = 1000;
93 static int sysctl_speed_limit_max = 200000;
94 static inline int speed_min(mddev_t *mddev)
96 return mddev->sync_speed_min ?
97 mddev->sync_speed_min : sysctl_speed_limit_min;
100 static inline int speed_max(mddev_t *mddev)
102 return mddev->sync_speed_max ?
103 mddev->sync_speed_max : sysctl_speed_limit_max;
106 static struct ctl_table_header *raid_table_header;
108 static ctl_table raid_table[] = {
110 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN,
111 .procname = "speed_limit_min",
112 .data = &sysctl_speed_limit_min,
113 .maxlen = sizeof(int),
114 .mode = S_IRUGO|S_IWUSR,
115 .proc_handler = &proc_dointvec,
118 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX,
119 .procname = "speed_limit_max",
120 .data = &sysctl_speed_limit_max,
121 .maxlen = sizeof(int),
122 .mode = S_IRUGO|S_IWUSR,
123 .proc_handler = &proc_dointvec,
128 static ctl_table raid_dir_table[] = {
130 .ctl_name = DEV_RAID,
133 .mode = S_IRUGO|S_IXUGO,
139 static ctl_table raid_root_table[] = {
145 .child = raid_dir_table,
150 static struct block_device_operations md_fops;
152 static int start_readonly;
155 * We have a system wide 'event count' that is incremented
156 * on any 'interesting' event, and readers of /proc/mdstat
157 * can use 'poll' or 'select' to find out when the event
161 * start array, stop array, error, add device, remove device,
162 * start build, activate spare
164 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
165 static atomic_t md_event_count;
166 void md_new_event(mddev_t *mddev)
168 atomic_inc(&md_event_count);
169 wake_up(&md_event_waiters);
170 sysfs_notify(&mddev->kobj, NULL, "sync_action");
172 EXPORT_SYMBOL_GPL(md_new_event);
174 /* Alternate version that can be called from interrupts
175 * when calling sysfs_notify isn't needed.
177 static void md_new_event_inintr(mddev_t *mddev)
179 atomic_inc(&md_event_count);
180 wake_up(&md_event_waiters);
184 * Enables to iterate over all existing md arrays
185 * all_mddevs_lock protects this list.
187 static LIST_HEAD(all_mddevs);
188 static DEFINE_SPINLOCK(all_mddevs_lock);
192 * iterates through all used mddevs in the system.
193 * We take care to grab the all_mddevs_lock whenever navigating
194 * the list, and to always hold a refcount when unlocked.
195 * Any code which breaks out of this loop while own
196 * a reference to the current mddev and must mddev_put it.
198 #define for_each_mddev(mddev,tmp) \
200 for (({ spin_lock(&all_mddevs_lock); \
201 tmp = all_mddevs.next; \
203 ({ if (tmp != &all_mddevs) \
204 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
205 spin_unlock(&all_mddevs_lock); \
206 if (mddev) mddev_put(mddev); \
207 mddev = list_entry(tmp, mddev_t, all_mddevs); \
208 tmp != &all_mddevs;}); \
209 ({ spin_lock(&all_mddevs_lock); \
214 static int md_fail_request (struct request_queue *q, struct bio *bio)
220 static inline mddev_t *mddev_get(mddev_t *mddev)
222 atomic_inc(&mddev->active);
226 static void mddev_put(mddev_t *mddev)
228 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230 if (!mddev->raid_disks && list_empty(&mddev->disks)) {
231 list_del(&mddev->all_mddevs);
232 spin_unlock(&all_mddevs_lock);
233 blk_cleanup_queue(mddev->queue);
234 kobject_put(&mddev->kobj);
236 spin_unlock(&all_mddevs_lock);
239 static mddev_t * mddev_find(dev_t unit)
241 mddev_t *mddev, *new = NULL;
244 spin_lock(&all_mddevs_lock);
245 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
246 if (mddev->unit == unit) {
248 spin_unlock(&all_mddevs_lock);
254 list_add(&new->all_mddevs, &all_mddevs);
255 spin_unlock(&all_mddevs_lock);
258 spin_unlock(&all_mddevs_lock);
260 new = kzalloc(sizeof(*new), GFP_KERNEL);
265 if (MAJOR(unit) == MD_MAJOR)
266 new->md_minor = MINOR(unit);
268 new->md_minor = MINOR(unit) >> MdpMinorShift;
270 mutex_init(&new->reconfig_mutex);
271 INIT_LIST_HEAD(&new->disks);
272 INIT_LIST_HEAD(&new->all_mddevs);
273 init_timer(&new->safemode_timer);
274 atomic_set(&new->active, 1);
275 spin_lock_init(&new->write_lock);
276 init_waitqueue_head(&new->sb_wait);
277 new->reshape_position = MaxSector;
278 new->resync_max = MaxSector;
280 new->queue = blk_alloc_queue(GFP_KERNEL);
285 /* Can be unlocked because the queue is new: no concurrency */
286 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
288 blk_queue_make_request(new->queue, md_fail_request);
293 static inline int mddev_lock(mddev_t * mddev)
295 return mutex_lock_interruptible(&mddev->reconfig_mutex);
298 static inline int mddev_trylock(mddev_t * mddev)
300 return mutex_trylock(&mddev->reconfig_mutex);
303 static inline void mddev_unlock(mddev_t * mddev)
305 mutex_unlock(&mddev->reconfig_mutex);
307 md_wakeup_thread(mddev->thread);
310 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
313 struct list_head *tmp;
315 rdev_for_each(rdev, tmp, mddev) {
316 if (rdev->desc_nr == nr)
322 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
324 struct list_head *tmp;
327 rdev_for_each(rdev, tmp, mddev) {
328 if (rdev->bdev->bd_dev == dev)
334 static struct mdk_personality *find_pers(int level, char *clevel)
336 struct mdk_personality *pers;
337 list_for_each_entry(pers, &pers_list, list) {
338 if (level != LEVEL_NONE && pers->level == level)
340 if (strcmp(pers->name, clevel)==0)
346 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
348 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
349 return MD_NEW_SIZE_BLOCKS(size);
352 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
356 size = rdev->sb_offset;
359 size &= ~((sector_t)chunk_size/1024 - 1);
363 static int alloc_disk_sb(mdk_rdev_t * rdev)
368 rdev->sb_page = alloc_page(GFP_KERNEL);
369 if (!rdev->sb_page) {
370 printk(KERN_ALERT "md: out of memory.\n");
377 static void free_disk_sb(mdk_rdev_t * rdev)
380 put_page(rdev->sb_page);
382 rdev->sb_page = NULL;
389 static void super_written(struct bio *bio, int error)
391 mdk_rdev_t *rdev = bio->bi_private;
392 mddev_t *mddev = rdev->mddev;
394 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
395 printk("md: super_written gets error=%d, uptodate=%d\n",
396 error, test_bit(BIO_UPTODATE, &bio->bi_flags));
397 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
398 md_error(mddev, rdev);
401 if (atomic_dec_and_test(&mddev->pending_writes))
402 wake_up(&mddev->sb_wait);
406 static void super_written_barrier(struct bio *bio, int error)
408 struct bio *bio2 = bio->bi_private;
409 mdk_rdev_t *rdev = bio2->bi_private;
410 mddev_t *mddev = rdev->mddev;
412 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
413 error == -EOPNOTSUPP) {
415 /* barriers don't appear to be supported :-( */
416 set_bit(BarriersNotsupp, &rdev->flags);
417 mddev->barriers_work = 0;
418 spin_lock_irqsave(&mddev->write_lock, flags);
419 bio2->bi_next = mddev->biolist;
420 mddev->biolist = bio2;
421 spin_unlock_irqrestore(&mddev->write_lock, flags);
422 wake_up(&mddev->sb_wait);
426 bio->bi_private = rdev;
427 super_written(bio, error);
431 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
432 sector_t sector, int size, struct page *page)
434 /* write first size bytes of page to sector of rdev
435 * Increment mddev->pending_writes before returning
436 * and decrement it on completion, waking up sb_wait
437 * if zero is reached.
438 * If an error occurred, call md_error
440 * As we might need to resubmit the request if BIO_RW_BARRIER
441 * causes ENOTSUPP, we allocate a spare bio...
443 struct bio *bio = bio_alloc(GFP_NOIO, 1);
444 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
446 bio->bi_bdev = rdev->bdev;
447 bio->bi_sector = sector;
448 bio_add_page(bio, page, size, 0);
449 bio->bi_private = rdev;
450 bio->bi_end_io = super_written;
453 atomic_inc(&mddev->pending_writes);
454 if (!test_bit(BarriersNotsupp, &rdev->flags)) {
456 rw |= (1<<BIO_RW_BARRIER);
457 rbio = bio_clone(bio, GFP_NOIO);
458 rbio->bi_private = bio;
459 rbio->bi_end_io = super_written_barrier;
460 submit_bio(rw, rbio);
465 void md_super_wait(mddev_t *mddev)
467 /* wait for all superblock writes that were scheduled to complete.
468 * if any had to be retried (due to BARRIER problems), retry them
472 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
473 if (atomic_read(&mddev->pending_writes)==0)
475 while (mddev->biolist) {
477 spin_lock_irq(&mddev->write_lock);
478 bio = mddev->biolist;
479 mddev->biolist = bio->bi_next ;
481 spin_unlock_irq(&mddev->write_lock);
482 submit_bio(bio->bi_rw, bio);
486 finish_wait(&mddev->sb_wait, &wq);
489 static void bi_complete(struct bio *bio, int error)
491 complete((struct completion*)bio->bi_private);
494 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
495 struct page *page, int rw)
497 struct bio *bio = bio_alloc(GFP_NOIO, 1);
498 struct completion event;
501 rw |= (1 << BIO_RW_SYNC);
504 bio->bi_sector = sector;
505 bio_add_page(bio, page, size, 0);
506 init_completion(&event);
507 bio->bi_private = &event;
508 bio->bi_end_io = bi_complete;
510 wait_for_completion(&event);
512 ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
516 EXPORT_SYMBOL_GPL(sync_page_io);
518 static int read_disk_sb(mdk_rdev_t * rdev, int size)
520 char b[BDEVNAME_SIZE];
521 if (!rdev->sb_page) {
529 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
535 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
536 bdevname(rdev->bdev,b));
540 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
542 if ( (sb1->set_uuid0 == sb2->set_uuid0) &&
543 (sb1->set_uuid1 == sb2->set_uuid1) &&
544 (sb1->set_uuid2 == sb2->set_uuid2) &&
545 (sb1->set_uuid3 == sb2->set_uuid3))
553 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
556 mdp_super_t *tmp1, *tmp2;
558 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
559 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
561 if (!tmp1 || !tmp2) {
563 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
571 * nr_disks is not constant
576 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
588 static u32 md_csum_fold(u32 csum)
590 csum = (csum & 0xffff) + (csum >> 16);
591 return (csum & 0xffff) + (csum >> 16);
594 static unsigned int calc_sb_csum(mdp_super_t * sb)
597 u32 *sb32 = (u32*)sb;
599 unsigned int disk_csum, csum;
601 disk_csum = sb->sb_csum;
604 for (i = 0; i < MD_SB_BYTES/4 ; i++)
606 csum = (newcsum & 0xffffffff) + (newcsum>>32);
610 /* This used to use csum_partial, which was wrong for several
611 * reasons including that different results are returned on
612 * different architectures. It isn't critical that we get exactly
613 * the same return value as before (we always csum_fold before
614 * testing, and that removes any differences). However as we
615 * know that csum_partial always returned a 16bit value on
616 * alphas, do a fold to maximise conformity to previous behaviour.
618 sb->sb_csum = md_csum_fold(disk_csum);
620 sb->sb_csum = disk_csum;
627 * Handle superblock details.
628 * We want to be able to handle multiple superblock formats
629 * so we have a common interface to them all, and an array of
630 * different handlers.
631 * We rely on user-space to write the initial superblock, and support
632 * reading and updating of superblocks.
633 * Interface methods are:
634 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
635 * loads and validates a superblock on dev.
636 * if refdev != NULL, compare superblocks on both devices
638 * 0 - dev has a superblock that is compatible with refdev
639 * 1 - dev has a superblock that is compatible and newer than refdev
640 * so dev should be used as the refdev in future
641 * -EINVAL superblock incompatible or invalid
642 * -othererror e.g. -EIO
644 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
645 * Verify that dev is acceptable into mddev.
646 * The first time, mddev->raid_disks will be 0, and data from
647 * dev should be merged in. Subsequent calls check that dev
648 * is new enough. Return 0 or -EINVAL
650 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
651 * Update the superblock for rdev with data in mddev
652 * This does not write to disc.
658 struct module *owner;
659 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
660 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
661 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
665 * load_super for 0.90.0
667 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
669 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
675 * Calculate the position of the superblock,
676 * it's at the end of the disk.
678 * It also happens to be a multiple of 4Kb.
680 sb_offset = calc_dev_sboffset(rdev->bdev);
681 rdev->sb_offset = sb_offset;
683 ret = read_disk_sb(rdev, MD_SB_BYTES);
688 bdevname(rdev->bdev, b);
689 sb = (mdp_super_t*)page_address(rdev->sb_page);
691 if (sb->md_magic != MD_SB_MAGIC) {
692 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
697 if (sb->major_version != 0 ||
698 sb->minor_version < 90 ||
699 sb->minor_version > 91) {
700 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
701 sb->major_version, sb->minor_version,
706 if (sb->raid_disks <= 0)
709 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
710 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
715 rdev->preferred_minor = sb->md_minor;
716 rdev->data_offset = 0;
717 rdev->sb_size = MD_SB_BYTES;
719 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
720 if (sb->level != 1 && sb->level != 4
721 && sb->level != 5 && sb->level != 6
722 && sb->level != 10) {
723 /* FIXME use a better test */
725 "md: bitmaps not supported for this level.\n");
730 if (sb->level == LEVEL_MULTIPATH)
733 rdev->desc_nr = sb->this_disk.number;
739 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
740 if (!uuid_equal(refsb, sb)) {
741 printk(KERN_WARNING "md: %s has different UUID to %s\n",
742 b, bdevname(refdev->bdev,b2));
745 if (!sb_equal(refsb, sb)) {
746 printk(KERN_WARNING "md: %s has same UUID"
747 " but different superblock to %s\n",
748 b, bdevname(refdev->bdev, b2));
752 ev2 = md_event(refsb);
758 rdev->size = calc_dev_size(rdev, sb->chunk_size);
760 if (rdev->size < sb->size && sb->level > 1)
761 /* "this cannot possibly happen" ... */
769 * validate_super for 0.90.0
771 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
774 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
775 __u64 ev1 = md_event(sb);
777 rdev->raid_disk = -1;
778 clear_bit(Faulty, &rdev->flags);
779 clear_bit(In_sync, &rdev->flags);
780 clear_bit(WriteMostly, &rdev->flags);
781 clear_bit(BarriersNotsupp, &rdev->flags);
783 if (mddev->raid_disks == 0) {
784 mddev->major_version = 0;
785 mddev->minor_version = sb->minor_version;
786 mddev->patch_version = sb->patch_version;
788 mddev->chunk_size = sb->chunk_size;
789 mddev->ctime = sb->ctime;
790 mddev->utime = sb->utime;
791 mddev->level = sb->level;
792 mddev->clevel[0] = 0;
793 mddev->layout = sb->layout;
794 mddev->raid_disks = sb->raid_disks;
795 mddev->size = sb->size;
797 mddev->bitmap_offset = 0;
798 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
800 if (mddev->minor_version >= 91) {
801 mddev->reshape_position = sb->reshape_position;
802 mddev->delta_disks = sb->delta_disks;
803 mddev->new_level = sb->new_level;
804 mddev->new_layout = sb->new_layout;
805 mddev->new_chunk = sb->new_chunk;
807 mddev->reshape_position = MaxSector;
808 mddev->delta_disks = 0;
809 mddev->new_level = mddev->level;
810 mddev->new_layout = mddev->layout;
811 mddev->new_chunk = mddev->chunk_size;
814 if (sb->state & (1<<MD_SB_CLEAN))
815 mddev->recovery_cp = MaxSector;
817 if (sb->events_hi == sb->cp_events_hi &&
818 sb->events_lo == sb->cp_events_lo) {
819 mddev->recovery_cp = sb->recovery_cp;
821 mddev->recovery_cp = 0;
824 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
825 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
826 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
827 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
829 mddev->max_disks = MD_SB_DISKS;
831 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
832 mddev->bitmap_file == NULL)
833 mddev->bitmap_offset = mddev->default_bitmap_offset;
835 } else if (mddev->pers == NULL) {
836 /* Insist on good event counter while assembling */
838 if (ev1 < mddev->events)
840 } else if (mddev->bitmap) {
841 /* if adding to array with a bitmap, then we can accept an
842 * older device ... but not too old.
844 if (ev1 < mddev->bitmap->events_cleared)
847 if (ev1 < mddev->events)
848 /* just a hot-add of a new device, leave raid_disk at -1 */
852 if (mddev->level != LEVEL_MULTIPATH) {
853 desc = sb->disks + rdev->desc_nr;
855 if (desc->state & (1<<MD_DISK_FAULTY))
856 set_bit(Faulty, &rdev->flags);
857 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
858 desc->raid_disk < mddev->raid_disks */) {
859 set_bit(In_sync, &rdev->flags);
860 rdev->raid_disk = desc->raid_disk;
862 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
863 set_bit(WriteMostly, &rdev->flags);
864 } else /* MULTIPATH are always insync */
865 set_bit(In_sync, &rdev->flags);
870 * sync_super for 0.90.0
872 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
875 struct list_head *tmp;
877 int next_spare = mddev->raid_disks;
880 /* make rdev->sb match mddev data..
883 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
884 * 3/ any empty disks < next_spare become removed
886 * disks[0] gets initialised to REMOVED because
887 * we cannot be sure from other fields if it has
888 * been initialised or not.
891 int active=0, working=0,failed=0,spare=0,nr_disks=0;
893 rdev->sb_size = MD_SB_BYTES;
895 sb = (mdp_super_t*)page_address(rdev->sb_page);
897 memset(sb, 0, sizeof(*sb));
899 sb->md_magic = MD_SB_MAGIC;
900 sb->major_version = mddev->major_version;
901 sb->patch_version = mddev->patch_version;
902 sb->gvalid_words = 0; /* ignored */
903 memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
904 memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
905 memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
906 memcpy(&sb->set_uuid3, mddev->uuid+12,4);
908 sb->ctime = mddev->ctime;
909 sb->level = mddev->level;
910 sb->size = mddev->size;
911 sb->raid_disks = mddev->raid_disks;
912 sb->md_minor = mddev->md_minor;
913 sb->not_persistent = 0;
914 sb->utime = mddev->utime;
916 sb->events_hi = (mddev->events>>32);
917 sb->events_lo = (u32)mddev->events;
919 if (mddev->reshape_position == MaxSector)
920 sb->minor_version = 90;
922 sb->minor_version = 91;
923 sb->reshape_position = mddev->reshape_position;
924 sb->new_level = mddev->new_level;
925 sb->delta_disks = mddev->delta_disks;
926 sb->new_layout = mddev->new_layout;
927 sb->new_chunk = mddev->new_chunk;
929 mddev->minor_version = sb->minor_version;
932 sb->recovery_cp = mddev->recovery_cp;
933 sb->cp_events_hi = (mddev->events>>32);
934 sb->cp_events_lo = (u32)mddev->events;
935 if (mddev->recovery_cp == MaxSector)
936 sb->state = (1<< MD_SB_CLEAN);
940 sb->layout = mddev->layout;
941 sb->chunk_size = mddev->chunk_size;
943 if (mddev->bitmap && mddev->bitmap_file == NULL)
944 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
946 sb->disks[0].state = (1<<MD_DISK_REMOVED);
947 rdev_for_each(rdev2, tmp, mddev) {
950 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
951 && !test_bit(Faulty, &rdev2->flags))
952 desc_nr = rdev2->raid_disk;
954 desc_nr = next_spare++;
955 rdev2->desc_nr = desc_nr;
956 d = &sb->disks[rdev2->desc_nr];
958 d->number = rdev2->desc_nr;
959 d->major = MAJOR(rdev2->bdev->bd_dev);
960 d->minor = MINOR(rdev2->bdev->bd_dev);
961 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
962 && !test_bit(Faulty, &rdev2->flags))
963 d->raid_disk = rdev2->raid_disk;
965 d->raid_disk = rdev2->desc_nr; /* compatibility */
966 if (test_bit(Faulty, &rdev2->flags))
967 d->state = (1<<MD_DISK_FAULTY);
968 else if (test_bit(In_sync, &rdev2->flags)) {
969 d->state = (1<<MD_DISK_ACTIVE);
970 d->state |= (1<<MD_DISK_SYNC);
978 if (test_bit(WriteMostly, &rdev2->flags))
979 d->state |= (1<<MD_DISK_WRITEMOSTLY);
981 /* now set the "removed" and "faulty" bits on any missing devices */
982 for (i=0 ; i < mddev->raid_disks ; i++) {
983 mdp_disk_t *d = &sb->disks[i];
984 if (d->state == 0 && d->number == 0) {
987 d->state = (1<<MD_DISK_REMOVED);
988 d->state |= (1<<MD_DISK_FAULTY);
992 sb->nr_disks = nr_disks;
993 sb->active_disks = active;
994 sb->working_disks = working;
995 sb->failed_disks = failed;
996 sb->spare_disks = spare;
998 sb->this_disk = sb->disks[rdev->desc_nr];
999 sb->sb_csum = calc_sb_csum(sb);
1003 * version 1 superblock
1006 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1010 unsigned long long newcsum;
1011 int size = 256 + le32_to_cpu(sb->max_dev)*2;
1012 __le32 *isuper = (__le32*)sb;
1015 disk_csum = sb->sb_csum;
1018 for (i=0; size>=4; size -= 4 )
1019 newcsum += le32_to_cpu(*isuper++);
1022 newcsum += le16_to_cpu(*(__le16*) isuper);
1024 csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1025 sb->sb_csum = disk_csum;
1026 return cpu_to_le32(csum);
1029 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1031 struct mdp_superblock_1 *sb;
1034 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1038 * Calculate the position of the superblock.
1039 * It is always aligned to a 4K boundary and
1040 * depeding on minor_version, it can be:
1041 * 0: At least 8K, but less than 12K, from end of device
1042 * 1: At start of device
1043 * 2: 4K from start of device.
1045 switch(minor_version) {
1047 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1049 sb_offset &= ~(sector_t)(4*2-1);
1050 /* convert from sectors to K */
1062 rdev->sb_offset = sb_offset;
1064 /* superblock is rarely larger than 1K, but it can be larger,
1065 * and it is safe to read 4k, so we do that
1067 ret = read_disk_sb(rdev, 4096);
1068 if (ret) return ret;
1071 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1073 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1074 sb->major_version != cpu_to_le32(1) ||
1075 le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1076 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1077 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1080 if (calc_sb_1_csum(sb) != sb->sb_csum) {
1081 printk("md: invalid superblock checksum on %s\n",
1082 bdevname(rdev->bdev,b));
1085 if (le64_to_cpu(sb->data_size) < 10) {
1086 printk("md: data_size too small on %s\n",
1087 bdevname(rdev->bdev,b));
1090 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
1091 if (sb->level != cpu_to_le32(1) &&
1092 sb->level != cpu_to_le32(4) &&
1093 sb->level != cpu_to_le32(5) &&
1094 sb->level != cpu_to_le32(6) &&
1095 sb->level != cpu_to_le32(10)) {
1097 "md: bitmaps not supported for this level.\n");
1102 rdev->preferred_minor = 0xffff;
1103 rdev->data_offset = le64_to_cpu(sb->data_offset);
1104 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1106 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1107 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1108 if (rdev->sb_size & bmask)
1109 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1112 && rdev->data_offset < sb_offset + (rdev->sb_size/512))
1115 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1118 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1124 struct mdp_superblock_1 *refsb =
1125 (struct mdp_superblock_1*)page_address(refdev->sb_page);
1127 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1128 sb->level != refsb->level ||
1129 sb->layout != refsb->layout ||
1130 sb->chunksize != refsb->chunksize) {
1131 printk(KERN_WARNING "md: %s has strangely different"
1132 " superblock to %s\n",
1133 bdevname(rdev->bdev,b),
1134 bdevname(refdev->bdev,b2));
1137 ev1 = le64_to_cpu(sb->events);
1138 ev2 = le64_to_cpu(refsb->events);
1146 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1148 rdev->size = rdev->sb_offset;
1149 if (rdev->size < le64_to_cpu(sb->data_size)/2)
1151 rdev->size = le64_to_cpu(sb->data_size)/2;
1152 if (le32_to_cpu(sb->chunksize))
1153 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1155 if (le64_to_cpu(sb->size) > rdev->size*2)
1160 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1162 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1163 __u64 ev1 = le64_to_cpu(sb->events);
1165 rdev->raid_disk = -1;
1166 clear_bit(Faulty, &rdev->flags);
1167 clear_bit(In_sync, &rdev->flags);
1168 clear_bit(WriteMostly, &rdev->flags);
1169 clear_bit(BarriersNotsupp, &rdev->flags);
1171 if (mddev->raid_disks == 0) {
1172 mddev->major_version = 1;
1173 mddev->patch_version = 0;
1174 mddev->external = 0;
1175 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1176 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1177 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1178 mddev->level = le32_to_cpu(sb->level);
1179 mddev->clevel[0] = 0;
1180 mddev->layout = le32_to_cpu(sb->layout);
1181 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1182 mddev->size = le64_to_cpu(sb->size)/2;
1183 mddev->events = ev1;
1184 mddev->bitmap_offset = 0;
1185 mddev->default_bitmap_offset = 1024 >> 9;
1187 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1188 memcpy(mddev->uuid, sb->set_uuid, 16);
1190 mddev->max_disks = (4096-256)/2;
1192 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1193 mddev->bitmap_file == NULL )
1194 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1196 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1197 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1198 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1199 mddev->new_level = le32_to_cpu(sb->new_level);
1200 mddev->new_layout = le32_to_cpu(sb->new_layout);
1201 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1203 mddev->reshape_position = MaxSector;
1204 mddev->delta_disks = 0;
1205 mddev->new_level = mddev->level;
1206 mddev->new_layout = mddev->layout;
1207 mddev->new_chunk = mddev->chunk_size;
1210 } else if (mddev->pers == NULL) {
1211 /* Insist of good event counter while assembling */
1213 if (ev1 < mddev->events)
1215 } else if (mddev->bitmap) {
1216 /* If adding to array with a bitmap, then we can accept an
1217 * older device, but not too old.
1219 if (ev1 < mddev->bitmap->events_cleared)
1222 if (ev1 < mddev->events)
1223 /* just a hot-add of a new device, leave raid_disk at -1 */
1226 if (mddev->level != LEVEL_MULTIPATH) {
1228 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1230 case 0xffff: /* spare */
1232 case 0xfffe: /* faulty */
1233 set_bit(Faulty, &rdev->flags);
1236 if ((le32_to_cpu(sb->feature_map) &
1237 MD_FEATURE_RECOVERY_OFFSET))
1238 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1240 set_bit(In_sync, &rdev->flags);
1241 rdev->raid_disk = role;
1244 if (sb->devflags & WriteMostly1)
1245 set_bit(WriteMostly, &rdev->flags);
1246 } else /* MULTIPATH are always insync */
1247 set_bit(In_sync, &rdev->flags);
1252 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1254 struct mdp_superblock_1 *sb;
1255 struct list_head *tmp;
1258 /* make rdev->sb match mddev and rdev data. */
1260 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1262 sb->feature_map = 0;
1264 sb->recovery_offset = cpu_to_le64(0);
1265 memset(sb->pad1, 0, sizeof(sb->pad1));
1266 memset(sb->pad2, 0, sizeof(sb->pad2));
1267 memset(sb->pad3, 0, sizeof(sb->pad3));
1269 sb->utime = cpu_to_le64((__u64)mddev->utime);
1270 sb->events = cpu_to_le64(mddev->events);
1272 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1274 sb->resync_offset = cpu_to_le64(0);
1276 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1278 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1279 sb->size = cpu_to_le64(mddev->size<<1);
1281 if (mddev->bitmap && mddev->bitmap_file == NULL) {
1282 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1283 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1286 if (rdev->raid_disk >= 0 &&
1287 !test_bit(In_sync, &rdev->flags) &&
1288 rdev->recovery_offset > 0) {
1289 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1290 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1293 if (mddev->reshape_position != MaxSector) {
1294 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1295 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1296 sb->new_layout = cpu_to_le32(mddev->new_layout);
1297 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1298 sb->new_level = cpu_to_le32(mddev->new_level);
1299 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1303 rdev_for_each(rdev2, tmp, mddev)
1304 if (rdev2->desc_nr+1 > max_dev)
1305 max_dev = rdev2->desc_nr+1;
1307 if (max_dev > le32_to_cpu(sb->max_dev))
1308 sb->max_dev = cpu_to_le32(max_dev);
1309 for (i=0; i<max_dev;i++)
1310 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1312 rdev_for_each(rdev2, tmp, mddev) {
1314 if (test_bit(Faulty, &rdev2->flags))
1315 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1316 else if (test_bit(In_sync, &rdev2->flags))
1317 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1318 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1319 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1321 sb->dev_roles[i] = cpu_to_le16(0xffff);
1324 sb->sb_csum = calc_sb_1_csum(sb);
1328 static struct super_type super_types[] = {
1331 .owner = THIS_MODULE,
1332 .load_super = super_90_load,
1333 .validate_super = super_90_validate,
1334 .sync_super = super_90_sync,
1338 .owner = THIS_MODULE,
1339 .load_super = super_1_load,
1340 .validate_super = super_1_validate,
1341 .sync_super = super_1_sync,
1345 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1347 struct list_head *tmp, *tmp2;
1348 mdk_rdev_t *rdev, *rdev2;
1350 rdev_for_each(rdev, tmp, mddev1)
1351 rdev_for_each(rdev2, tmp2, mddev2)
1352 if (rdev->bdev->bd_contains ==
1353 rdev2->bdev->bd_contains)
1359 static LIST_HEAD(pending_raid_disks);
1361 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1363 char b[BDEVNAME_SIZE];
1372 /* make sure rdev->size exceeds mddev->size */
1373 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1375 /* Cannot change size, so fail
1376 * If mddev->level <= 0, then we don't care
1377 * about aligning sizes (e.g. linear)
1379 if (mddev->level > 0)
1382 mddev->size = rdev->size;
1385 /* Verify rdev->desc_nr is unique.
1386 * If it is -1, assign a free number, else
1387 * check number is not in use
1389 if (rdev->desc_nr < 0) {
1391 if (mddev->pers) choice = mddev->raid_disks;
1392 while (find_rdev_nr(mddev, choice))
1394 rdev->desc_nr = choice;
1396 if (find_rdev_nr(mddev, rdev->desc_nr))
1399 bdevname(rdev->bdev,b);
1400 while ( (s=strchr(b, '/')) != NULL)
1403 rdev->mddev = mddev;
1404 printk(KERN_INFO "md: bind<%s>\n", b);
1406 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
1409 if (rdev->bdev->bd_part)
1410 ko = &rdev->bdev->bd_part->dev.kobj;
1412 ko = &rdev->bdev->bd_disk->dev.kobj;
1413 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) {
1414 kobject_del(&rdev->kobj);
1417 list_add(&rdev->same_set, &mddev->disks);
1418 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
1422 printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
1427 static void md_delayed_delete(struct work_struct *ws)
1429 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
1430 kobject_del(&rdev->kobj);
1431 kobject_put(&rdev->kobj);
1434 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1436 char b[BDEVNAME_SIZE];
1441 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1442 list_del_init(&rdev->same_set);
1443 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1445 sysfs_remove_link(&rdev->kobj, "block");
1447 /* We need to delay this, otherwise we can deadlock when
1448 * writing to 'remove' to "dev/state"
1450 INIT_WORK(&rdev->del_work, md_delayed_delete);
1451 kobject_get(&rdev->kobj);
1452 schedule_work(&rdev->del_work);
1456 * prevent the device from being mounted, repartitioned or
1457 * otherwise reused by a RAID array (or any other kernel
1458 * subsystem), by bd_claiming the device.
1460 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
1463 struct block_device *bdev;
1464 char b[BDEVNAME_SIZE];
1466 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1468 printk(KERN_ERR "md: could not open %s.\n",
1469 __bdevname(dev, b));
1470 return PTR_ERR(bdev);
1472 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
1474 printk(KERN_ERR "md: could not bd_claim %s.\n",
1480 set_bit(AllReserved, &rdev->flags);
1485 static void unlock_rdev(mdk_rdev_t *rdev)
1487 struct block_device *bdev = rdev->bdev;
1495 void md_autodetect_dev(dev_t dev);
1497 static void export_rdev(mdk_rdev_t * rdev)
1499 char b[BDEVNAME_SIZE];
1500 printk(KERN_INFO "md: export_rdev(%s)\n",
1501 bdevname(rdev->bdev,b));
1505 list_del_init(&rdev->same_set);
1507 if (test_bit(AutoDetected, &rdev->flags))
1508 md_autodetect_dev(rdev->bdev->bd_dev);
1511 kobject_put(&rdev->kobj);
1514 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1516 unbind_rdev_from_array(rdev);
1520 static void export_array(mddev_t *mddev)
1522 struct list_head *tmp;
1525 rdev_for_each(rdev, tmp, mddev) {
1530 kick_rdev_from_array(rdev);
1532 if (!list_empty(&mddev->disks))
1534 mddev->raid_disks = 0;
1535 mddev->major_version = 0;
1538 static void print_desc(mdp_disk_t *desc)
1540 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1541 desc->major,desc->minor,desc->raid_disk,desc->state);
1544 static void print_sb(mdp_super_t *sb)
1549 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1550 sb->major_version, sb->minor_version, sb->patch_version,
1551 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1553 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1554 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1555 sb->md_minor, sb->layout, sb->chunk_size);
1556 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d"
1557 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1558 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1559 sb->failed_disks, sb->spare_disks,
1560 sb->sb_csum, (unsigned long)sb->events_lo);
1563 for (i = 0; i < MD_SB_DISKS; i++) {
1566 desc = sb->disks + i;
1567 if (desc->number || desc->major || desc->minor ||
1568 desc->raid_disk || (desc->state && (desc->state != 4))) {
1569 printk(" D %2d: ", i);
1573 printk(KERN_INFO "md: THIS: ");
1574 print_desc(&sb->this_disk);
1578 static void print_rdev(mdk_rdev_t *rdev)
1580 char b[BDEVNAME_SIZE];
1581 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1582 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1583 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1585 if (rdev->sb_loaded) {
1586 printk(KERN_INFO "md: rdev superblock:\n");
1587 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1589 printk(KERN_INFO "md: no rdev superblock!\n");
1592 static void md_print_devices(void)
1594 struct list_head *tmp, *tmp2;
1597 char b[BDEVNAME_SIZE];
1600 printk("md: **********************************\n");
1601 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
1602 printk("md: **********************************\n");
1603 for_each_mddev(mddev, tmp) {
1606 bitmap_print_sb(mddev->bitmap);
1608 printk("%s: ", mdname(mddev));
1609 rdev_for_each(rdev, tmp2, mddev)
1610 printk("<%s>", bdevname(rdev->bdev,b));
1613 rdev_for_each(rdev, tmp2, mddev)
1616 printk("md: **********************************\n");
1621 static void sync_sbs(mddev_t * mddev, int nospares)
1623 /* Update each superblock (in-memory image), but
1624 * if we are allowed to, skip spares which already
1625 * have the right event counter, or have one earlier
1626 * (which would mean they aren't being marked as dirty
1627 * with the rest of the array)
1630 struct list_head *tmp;
1632 rdev_for_each(rdev, tmp, mddev) {
1633 if (rdev->sb_events == mddev->events ||
1635 rdev->raid_disk < 0 &&
1636 (rdev->sb_events&1)==0 &&
1637 rdev->sb_events+1 == mddev->events)) {
1638 /* Don't update this superblock */
1639 rdev->sb_loaded = 2;
1641 super_types[mddev->major_version].
1642 sync_super(mddev, rdev);
1643 rdev->sb_loaded = 1;
1648 static void md_update_sb(mddev_t * mddev, int force_change)
1650 struct list_head *tmp;
1655 if (mddev->external)
1658 spin_lock_irq(&mddev->write_lock);
1660 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1661 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
1663 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
1664 /* just a clean<-> dirty transition, possibly leave spares alone,
1665 * though if events isn't the right even/odd, we will have to do
1671 if (mddev->degraded)
1672 /* If the array is degraded, then skipping spares is both
1673 * dangerous and fairly pointless.
1674 * Dangerous because a device that was removed from the array
1675 * might have a event_count that still looks up-to-date,
1676 * so it can be re-added without a resync.
1677 * Pointless because if there are any spares to skip,
1678 * then a recovery will happen and soon that array won't
1679 * be degraded any more and the spare can go back to sleep then.
1683 sync_req = mddev->in_sync;
1684 mddev->utime = get_seconds();
1686 /* If this is just a dirty<->clean transition, and the array is clean
1687 * and 'events' is odd, we can roll back to the previous clean state */
1689 && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1690 && (mddev->events & 1)
1691 && mddev->events != 1)
1694 /* otherwise we have to go forward and ... */
1696 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1697 /* .. if the array isn't clean, insist on an odd 'events' */
1698 if ((mddev->events&1)==0) {
1703 /* otherwise insist on an even 'events' (for clean states) */
1704 if ((mddev->events&1)) {
1711 if (!mddev->events) {
1713 * oops, this 64-bit counter should never wrap.
1714 * Either we are in around ~1 trillion A.C., assuming
1715 * 1 reboot per second, or we have a bug:
1722 * do not write anything to disk if using
1723 * nonpersistent superblocks
1725 if (!mddev->persistent) {
1726 if (!mddev->external)
1727 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1729 spin_unlock_irq(&mddev->write_lock);
1730 wake_up(&mddev->sb_wait);
1733 sync_sbs(mddev, nospares);
1734 spin_unlock_irq(&mddev->write_lock);
1737 "md: updating %s RAID superblock on device (in sync %d)\n",
1738 mdname(mddev),mddev->in_sync);
1740 bitmap_update_sb(mddev->bitmap);
1741 rdev_for_each(rdev, tmp, mddev) {
1742 char b[BDEVNAME_SIZE];
1743 dprintk(KERN_INFO "md: ");
1744 if (rdev->sb_loaded != 1)
1745 continue; /* no noise on spare devices */
1746 if (test_bit(Faulty, &rdev->flags))
1747 dprintk("(skipping faulty ");
1749 dprintk("%s ", bdevname(rdev->bdev,b));
1750 if (!test_bit(Faulty, &rdev->flags)) {
1751 md_super_write(mddev,rdev,
1752 rdev->sb_offset<<1, rdev->sb_size,
1754 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1755 bdevname(rdev->bdev,b),
1756 (unsigned long long)rdev->sb_offset);
1757 rdev->sb_events = mddev->events;
1761 if (mddev->level == LEVEL_MULTIPATH)
1762 /* only need to write one superblock... */
1765 md_super_wait(mddev);
1766 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
1768 spin_lock_irq(&mddev->write_lock);
1769 if (mddev->in_sync != sync_req ||
1770 test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
1771 /* have to write it out again */
1772 spin_unlock_irq(&mddev->write_lock);
1775 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
1776 spin_unlock_irq(&mddev->write_lock);
1777 wake_up(&mddev->sb_wait);
1781 /* words written to sysfs files may, or my not, be \n terminated.
1782 * We want to accept with case. For this we use cmd_match.
1784 static int cmd_match(const char *cmd, const char *str)
1786 /* See if cmd, written into a sysfs file, matches
1787 * str. They must either be the same, or cmd can
1788 * have a trailing newline
1790 while (*cmd && *str && *cmd == *str) {
1801 struct rdev_sysfs_entry {
1802 struct attribute attr;
1803 ssize_t (*show)(mdk_rdev_t *, char *);
1804 ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1808 state_show(mdk_rdev_t *rdev, char *page)
1813 if (test_bit(Faulty, &rdev->flags)) {
1814 len+= sprintf(page+len, "%sfaulty",sep);
1817 if (test_bit(In_sync, &rdev->flags)) {
1818 len += sprintf(page+len, "%sin_sync",sep);
1821 if (test_bit(WriteMostly, &rdev->flags)) {
1822 len += sprintf(page+len, "%swrite_mostly",sep);
1825 if (!test_bit(Faulty, &rdev->flags) &&
1826 !test_bit(In_sync, &rdev->flags)) {
1827 len += sprintf(page+len, "%sspare", sep);
1830 return len+sprintf(page+len, "\n");
1834 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1837 * faulty - simulates and error
1838 * remove - disconnects the device
1839 * writemostly - sets write_mostly
1840 * -writemostly - clears write_mostly
1843 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1844 md_error(rdev->mddev, rdev);
1846 } else if (cmd_match(buf, "remove")) {
1847 if (rdev->raid_disk >= 0)
1850 mddev_t *mddev = rdev->mddev;
1851 kick_rdev_from_array(rdev);
1853 md_update_sb(mddev, 1);
1854 md_new_event(mddev);
1857 } else if (cmd_match(buf, "writemostly")) {
1858 set_bit(WriteMostly, &rdev->flags);
1860 } else if (cmd_match(buf, "-writemostly")) {
1861 clear_bit(WriteMostly, &rdev->flags);
1864 return err ? err : len;
1866 static struct rdev_sysfs_entry rdev_state =
1867 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
1870 errors_show(mdk_rdev_t *rdev, char *page)
1872 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1876 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1879 unsigned long n = simple_strtoul(buf, &e, 10);
1880 if (*buf && (*e == 0 || *e == '\n')) {
1881 atomic_set(&rdev->corrected_errors, n);
1886 static struct rdev_sysfs_entry rdev_errors =
1887 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
1890 slot_show(mdk_rdev_t *rdev, char *page)
1892 if (rdev->raid_disk < 0)
1893 return sprintf(page, "none\n");
1895 return sprintf(page, "%d\n", rdev->raid_disk);
1899 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1904 int slot = simple_strtoul(buf, &e, 10);
1905 if (strncmp(buf, "none", 4)==0)
1907 else if (e==buf || (*e && *e!= '\n'))
1909 if (rdev->mddev->pers) {
1910 /* Setting 'slot' on an active array requires also
1911 * updating the 'rd%d' link, and communicating
1912 * with the personality with ->hot_*_disk.
1913 * For now we only support removing
1914 * failed/spare devices. This normally happens automatically,
1915 * but not when the metadata is externally managed.
1919 if (rdev->raid_disk == -1)
1921 /* personality does all needed checks */
1922 if (rdev->mddev->pers->hot_add_disk == NULL)
1924 err = rdev->mddev->pers->
1925 hot_remove_disk(rdev->mddev, rdev->raid_disk);
1928 sprintf(nm, "rd%d", rdev->raid_disk);
1929 sysfs_remove_link(&rdev->mddev->kobj, nm);
1930 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
1931 md_wakeup_thread(rdev->mddev->thread);
1933 if (slot >= rdev->mddev->raid_disks)
1935 rdev->raid_disk = slot;
1936 /* assume it is working */
1937 clear_bit(Faulty, &rdev->flags);
1938 clear_bit(WriteMostly, &rdev->flags);
1939 set_bit(In_sync, &rdev->flags);
1945 static struct rdev_sysfs_entry rdev_slot =
1946 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
1949 offset_show(mdk_rdev_t *rdev, char *page)
1951 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1955 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1958 unsigned long long offset = simple_strtoull(buf, &e, 10);
1959 if (e==buf || (*e && *e != '\n'))
1961 if (rdev->mddev->pers)
1963 if (rdev->size && rdev->mddev->external)
1964 /* Must set offset before size, so overlap checks
1967 rdev->data_offset = offset;
1971 static struct rdev_sysfs_entry rdev_offset =
1972 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
1975 rdev_size_show(mdk_rdev_t *rdev, char *page)
1977 return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1980 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
1982 /* check if two start/length pairs overlap */
1991 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1994 unsigned long long size = simple_strtoull(buf, &e, 10);
1995 unsigned long long oldsize = rdev->size;
1996 mddev_t *my_mddev = rdev->mddev;
1998 if (e==buf || (*e && *e != '\n'))
2003 if (size > oldsize && rdev->mddev->external) {
2004 /* need to check that all other rdevs with the same ->bdev
2005 * do not overlap. We need to unlock the mddev to avoid
2006 * a deadlock. We have already changed rdev->size, and if
2007 * we have to change it back, we will have the lock again.
2011 struct list_head *tmp, *tmp2;
2013 mddev_unlock(my_mddev);
2014 for_each_mddev(mddev, tmp) {
2018 rdev_for_each(rdev2, tmp2, mddev)
2019 if (test_bit(AllReserved, &rdev2->flags) ||
2020 (rdev->bdev == rdev2->bdev &&
2022 overlaps(rdev->data_offset, rdev->size,
2023 rdev2->data_offset, rdev2->size))) {
2027 mddev_unlock(mddev);
2033 mddev_lock(my_mddev);
2035 /* Someone else could have slipped in a size
2036 * change here, but doing so is just silly.
2037 * We put oldsize back because we *know* it is
2038 * safe, and trust userspace not to race with
2041 rdev->size = oldsize;
2045 if (size < my_mddev->size || my_mddev->size == 0)
2046 my_mddev->size = size;
2050 static struct rdev_sysfs_entry rdev_size =
2051 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
2053 static struct attribute *rdev_default_attrs[] = {
2062 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2064 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2065 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2066 mddev_t *mddev = rdev->mddev;
2072 rv = mddev ? mddev_lock(mddev) : -EBUSY;
2074 if (rdev->mddev == NULL)
2077 rv = entry->show(rdev, page);
2078 mddev_unlock(mddev);
2084 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
2085 const char *page, size_t length)
2087 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
2088 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
2090 mddev_t *mddev = rdev->mddev;
2094 if (!capable(CAP_SYS_ADMIN))
2096 rv = mddev ? mddev_lock(mddev): -EBUSY;
2098 if (rdev->mddev == NULL)
2101 rv = entry->store(rdev, page, length);
2102 mddev_unlock(mddev);
2107 static void rdev_free(struct kobject *ko)
2109 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
2112 static struct sysfs_ops rdev_sysfs_ops = {
2113 .show = rdev_attr_show,
2114 .store = rdev_attr_store,
2116 static struct kobj_type rdev_ktype = {
2117 .release = rdev_free,
2118 .sysfs_ops = &rdev_sysfs_ops,
2119 .default_attrs = rdev_default_attrs,
2123 * Import a device. If 'super_format' >= 0, then sanity check the superblock
2125 * mark the device faulty if:
2127 * - the device is nonexistent (zero size)
2128 * - the device has no valid superblock
2130 * a faulty rdev _never_ has rdev->sb set.
2132 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
2134 char b[BDEVNAME_SIZE];
2139 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
2141 printk(KERN_ERR "md: could not alloc mem for new device!\n");
2142 return ERR_PTR(-ENOMEM);
2145 if ((err = alloc_disk_sb(rdev)))
2148 err = lock_rdev(rdev, newdev, super_format == -2);
2152 kobject_init(&rdev->kobj, &rdev_ktype);
2155 rdev->saved_raid_disk = -1;
2156 rdev->raid_disk = -1;
2158 rdev->data_offset = 0;
2159 rdev->sb_events = 0;
2160 atomic_set(&rdev->nr_pending, 0);
2161 atomic_set(&rdev->read_errors, 0);
2162 atomic_set(&rdev->corrected_errors, 0);
2164 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
2167 "md: %s has zero or unknown size, marking faulty!\n",
2168 bdevname(rdev->bdev,b));
2173 if (super_format >= 0) {
2174 err = super_types[super_format].
2175 load_super(rdev, NULL, super_minor);
2176 if (err == -EINVAL) {
2178 "md: %s does not have a valid v%d.%d "
2179 "superblock, not importing!\n",
2180 bdevname(rdev->bdev,b),
2181 super_format, super_minor);
2186 "md: could not read %s's sb, not importing!\n",
2187 bdevname(rdev->bdev,b));
2191 INIT_LIST_HEAD(&rdev->same_set);
2196 if (rdev->sb_page) {
2202 return ERR_PTR(err);
2206 * Check a full RAID array for plausibility
2210 static void analyze_sbs(mddev_t * mddev)
2213 struct list_head *tmp;
2214 mdk_rdev_t *rdev, *freshest;
2215 char b[BDEVNAME_SIZE];
2218 rdev_for_each(rdev, tmp, mddev)
2219 switch (super_types[mddev->major_version].
2220 load_super(rdev, freshest, mddev->minor_version)) {
2228 "md: fatal superblock inconsistency in %s"
2229 " -- removing from array\n",
2230 bdevname(rdev->bdev,b));
2231 kick_rdev_from_array(rdev);
2235 super_types[mddev->major_version].
2236 validate_super(mddev, freshest);
2239 rdev_for_each(rdev, tmp, mddev) {
2240 if (rdev != freshest)
2241 if (super_types[mddev->major_version].
2242 validate_super(mddev, rdev)) {
2243 printk(KERN_WARNING "md: kicking non-fresh %s"
2245 bdevname(rdev->bdev,b));
2246 kick_rdev_from_array(rdev);
2249 if (mddev->level == LEVEL_MULTIPATH) {
2250 rdev->desc_nr = i++;
2251 rdev->raid_disk = rdev->desc_nr;
2252 set_bit(In_sync, &rdev->flags);
2253 } else if (rdev->raid_disk >= mddev->raid_disks) {
2254 rdev->raid_disk = -1;
2255 clear_bit(In_sync, &rdev->flags);
2261 if (mddev->recovery_cp != MaxSector &&
2263 printk(KERN_ERR "md: %s: raid array is not clean"
2264 " -- starting background reconstruction\n",
2270 safe_delay_show(mddev_t *mddev, char *page)
2272 int msec = (mddev->safemode_delay*1000)/HZ;
2273 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2276 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2284 /* remove a period, and count digits after it */
2285 if (len >= sizeof(buf))
2287 strlcpy(buf, cbuf, len);
2289 for (i=0; i<len; i++) {
2291 if (isdigit(buf[i])) {
2296 } else if (buf[i] == '.') {
2301 msec = simple_strtoul(buf, &e, 10);
2302 if (e == buf || (*e && *e != '\n'))
2304 msec = (msec * 1000) / scale;
2306 mddev->safemode_delay = 0;
2308 mddev->safemode_delay = (msec*HZ)/1000;
2309 if (mddev->safemode_delay == 0)
2310 mddev->safemode_delay = 1;
2314 static struct md_sysfs_entry md_safe_delay =
2315 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
2318 level_show(mddev_t *mddev, char *page)
2320 struct mdk_personality *p = mddev->pers;
2322 return sprintf(page, "%s\n", p->name);
2323 else if (mddev->clevel[0])
2324 return sprintf(page, "%s\n", mddev->clevel);
2325 else if (mddev->level != LEVEL_NONE)
2326 return sprintf(page, "%d\n", mddev->level);
2332 level_store(mddev_t *mddev, const char *buf, size_t len)
2339 if (len >= sizeof(mddev->clevel))
2341 strncpy(mddev->clevel, buf, len);
2342 if (mddev->clevel[len-1] == '\n')
2344 mddev->clevel[len] = 0;
2345 mddev->level = LEVEL_NONE;
2349 static struct md_sysfs_entry md_level =
2350 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
2354 layout_show(mddev_t *mddev, char *page)
2356 /* just a number, not meaningful for all levels */
2357 if (mddev->reshape_position != MaxSector &&
2358 mddev->layout != mddev->new_layout)
2359 return sprintf(page, "%d (%d)\n",
2360 mddev->new_layout, mddev->layout);
2361 return sprintf(page, "%d\n", mddev->layout);
2365 layout_store(mddev_t *mddev, const char *buf, size_t len)
2368 unsigned long n = simple_strtoul(buf, &e, 10);
2370 if (!*buf || (*e && *e != '\n'))
2375 if (mddev->reshape_position != MaxSector)
2376 mddev->new_layout = n;
2381 static struct md_sysfs_entry md_layout =
2382 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
2386 raid_disks_show(mddev_t *mddev, char *page)
2388 if (mddev->raid_disks == 0)
2390 if (mddev->reshape_position != MaxSector &&
2391 mddev->delta_disks != 0)
2392 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
2393 mddev->raid_disks - mddev->delta_disks);
2394 return sprintf(page, "%d\n", mddev->raid_disks);
2397 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2400 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2404 unsigned long n = simple_strtoul(buf, &e, 10);
2406 if (!*buf || (*e && *e != '\n'))
2410 rv = update_raid_disks(mddev, n);
2411 else if (mddev->reshape_position != MaxSector) {
2412 int olddisks = mddev->raid_disks - mddev->delta_disks;
2413 mddev->delta_disks = n - olddisks;
2414 mddev->raid_disks = n;
2416 mddev->raid_disks = n;
2417 return rv ? rv : len;
2419 static struct md_sysfs_entry md_raid_disks =
2420 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
2423 chunk_size_show(mddev_t *mddev, char *page)
2425 if (mddev->reshape_position != MaxSector &&
2426 mddev->chunk_size != mddev->new_chunk)
2427 return sprintf(page, "%d (%d)\n", mddev->new_chunk,
2429 return sprintf(page, "%d\n", mddev->chunk_size);
2433 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2435 /* can only set chunk_size if array is not yet active */
2437 unsigned long n = simple_strtoul(buf, &e, 10);
2439 if (!*buf || (*e && *e != '\n'))
2444 else if (mddev->reshape_position != MaxSector)
2445 mddev->new_chunk = n;
2447 mddev->chunk_size = n;
2450 static struct md_sysfs_entry md_chunk_size =
2451 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
2454 resync_start_show(mddev_t *mddev, char *page)
2456 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
2460 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
2462 /* can only set chunk_size if array is not yet active */
2464 unsigned long long n = simple_strtoull(buf, &e, 10);
2468 if (!*buf || (*e && *e != '\n'))
2471 mddev->recovery_cp = n;
2474 static struct md_sysfs_entry md_resync_start =
2475 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
2478 * The array state can be:
2481 * No devices, no size, no level
2482 * Equivalent to STOP_ARRAY ioctl
2484 * May have some settings, but array is not active
2485 * all IO results in error
2486 * When written, doesn't tear down array, but just stops it
2487 * suspended (not supported yet)
2488 * All IO requests will block. The array can be reconfigured.
2489 * Writing this, if accepted, will block until array is quiessent
2491 * no resync can happen. no superblocks get written.
2492 * write requests fail
2494 * like readonly, but behaves like 'clean' on a write request.
2496 * clean - no pending writes, but otherwise active.
2497 * When written to inactive array, starts without resync
2498 * If a write request arrives then
2499 * if metadata is known, mark 'dirty' and switch to 'active'.
2500 * if not known, block and switch to write-pending
2501 * If written to an active array that has pending writes, then fails.
2503 * fully active: IO and resync can be happening.
2504 * When written to inactive array, starts with resync
2507 * clean, but writes are blocked waiting for 'active' to be written.
2510 * like active, but no writes have been seen for a while (100msec).
2513 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2514 write_pending, active_idle, bad_word};
2515 static char *array_states[] = {
2516 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2517 "write-pending", "active-idle", NULL };
2519 static int match_word(const char *word, char **list)
2522 for (n=0; list[n]; n++)
2523 if (cmd_match(word, list[n]))
2529 array_state_show(mddev_t *mddev, char *page)
2531 enum array_state st = inactive;
2544 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
2546 else if (mddev->safemode)
2552 if (list_empty(&mddev->disks) &&
2553 mddev->raid_disks == 0 &&
2559 return sprintf(page, "%s\n", array_states[st]);
2562 static int do_md_stop(mddev_t * mddev, int ro);
2563 static int do_md_run(mddev_t * mddev);
2564 static int restart_array(mddev_t *mddev);
2567 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2570 enum array_state st = match_word(buf, array_states);
2575 /* stopping an active array */
2576 if (atomic_read(&mddev->active) > 1)
2578 err = do_md_stop(mddev, 0);
2581 /* stopping an active array */
2583 if (atomic_read(&mddev->active) > 1)
2585 err = do_md_stop(mddev, 2);
2587 err = 0; /* already inactive */
2590 break; /* not supported yet */
2593 err = do_md_stop(mddev, 1);
2596 err = do_md_run(mddev);
2600 /* stopping an active array */
2602 err = do_md_stop(mddev, 1);
2604 mddev->ro = 2; /* FIXME mark devices writable */
2607 err = do_md_run(mddev);
2612 restart_array(mddev);
2613 spin_lock_irq(&mddev->write_lock);
2614 if (atomic_read(&mddev->writes_pending) == 0) {
2615 if (mddev->in_sync == 0) {
2617 if (mddev->persistent)
2618 set_bit(MD_CHANGE_CLEAN,
2624 spin_unlock_irq(&mddev->write_lock);
2627 mddev->recovery_cp = MaxSector;
2628 err = do_md_run(mddev);
2633 restart_array(mddev);
2634 if (mddev->external)
2635 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2636 wake_up(&mddev->sb_wait);
2640 err = do_md_run(mddev);
2645 /* these cannot be set */
2653 static struct md_sysfs_entry md_array_state =
2654 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
2657 null_show(mddev_t *mddev, char *page)
2663 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2665 /* buf must be %d:%d\n? giving major and minor numbers */
2666 /* The new device is added to the array.
2667 * If the array has a persistent superblock, we read the
2668 * superblock to initialise info and check validity.
2669 * Otherwise, only checking done is that in bind_rdev_to_array,
2670 * which mainly checks size.
2673 int major = simple_strtoul(buf, &e, 10);
2679 if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2681 minor = simple_strtoul(e+1, &e, 10);
2682 if (*e && *e != '\n')
2684 dev = MKDEV(major, minor);
2685 if (major != MAJOR(dev) ||
2686 minor != MINOR(dev))
2690 if (mddev->persistent) {
2691 rdev = md_import_device(dev, mddev->major_version,
2692 mddev->minor_version);
2693 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2694 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2695 mdk_rdev_t, same_set);
2696 err = super_types[mddev->major_version]
2697 .load_super(rdev, rdev0, mddev->minor_version);
2701 } else if (mddev->external)
2702 rdev = md_import_device(dev, -2, -1);
2704 rdev = md_import_device(dev, -1, -1);
2707 return PTR_ERR(rdev);
2708 err = bind_rdev_to_array(rdev, mddev);
2712 return err ? err : len;
2715 static struct md_sysfs_entry md_new_device =
2716 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
2719 bitmap_store(mddev_t *mddev, const char *buf, size_t len)
2722 unsigned long chunk, end_chunk;
2726 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
2728 chunk = end_chunk = simple_strtoul(buf, &end, 0);
2729 if (buf == end) break;
2730 if (*end == '-') { /* range */
2732 end_chunk = simple_strtoul(buf, &end, 0);
2733 if (buf == end) break;
2735 if (*end && !isspace(*end)) break;
2736 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
2738 while (isspace(*buf)) buf++;
2740 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
2745 static struct md_sysfs_entry md_bitmap =
2746 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
2749 size_show(mddev_t *mddev, char *page)
2751 return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2754 static int update_size(mddev_t *mddev, unsigned long size);
2757 size_store(mddev_t *mddev, const char *buf, size_t len)
2759 /* If array is inactive, we can reduce the component size, but
2760 * not increase it (except from 0).
2761 * If array is active, we can try an on-line resize
2765 unsigned long long size = simple_strtoull(buf, &e, 10);
2766 if (!*buf || *buf == '\n' ||
2771 err = update_size(mddev, size);
2772 md_update_sb(mddev, 1);
2774 if (mddev->size == 0 ||
2780 return err ? err : len;
2783 static struct md_sysfs_entry md_size =
2784 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
2789 * 'none' for arrays with no metadata (good luck...)
2790 * 'external' for arrays with externally managed metadata,
2791 * or N.M for internally known formats
2794 metadata_show(mddev_t *mddev, char *page)
2796 if (mddev->persistent)
2797 return sprintf(page, "%d.%d\n",
2798 mddev->major_version, mddev->minor_version);
2799 else if (mddev->external)
2800 return sprintf(page, "external:%s\n", mddev->metadata_type);
2802 return sprintf(page, "none\n");
2806 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2810 if (!list_empty(&mddev->disks))
2813 if (cmd_match(buf, "none")) {
2814 mddev->persistent = 0;
2815 mddev->external = 0;
2816 mddev->major_version = 0;
2817 mddev->minor_version = 90;
2820 if (strncmp(buf, "external:", 9) == 0) {
2821 size_t namelen = len-9;
2822 if (namelen >= sizeof(mddev->metadata_type))
2823 namelen = sizeof(mddev->metadata_type)-1;
2824 strncpy(mddev->metadata_type, buf+9, namelen);
2825 mddev->metadata_type[namelen] = 0;
2826 if (namelen && mddev->metadata_type[namelen-1] == '\n')
2827 mddev->metadata_type[--namelen] = 0;
2828 mddev->persistent = 0;
2829 mddev->external = 1;
2830 mddev->major_version = 0;
2831 mddev->minor_version = 90;
2834 major = simple_strtoul(buf, &e, 10);
2835 if (e==buf || *e != '.')
2838 minor = simple_strtoul(buf, &e, 10);
2839 if (e==buf || (*e && *e != '\n') )
2841 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
2843 mddev->major_version = major;
2844 mddev->minor_version = minor;
2845 mddev->persistent = 1;
2846 mddev->external = 0;
2850 static struct md_sysfs_entry md_metadata =
2851 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
2854 action_show(mddev_t *mddev, char *page)
2856 char *type = "idle";
2857 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2858 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
2859 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2861 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2862 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2864 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2871 return sprintf(page, "%s\n", type);
2875 action_store(mddev_t *mddev, const char *page, size_t len)
2877 if (!mddev->pers || !mddev->pers->sync_request)
2880 if (cmd_match(page, "idle")) {
2881 if (mddev->sync_thread) {
2882 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2883 md_unregister_thread(mddev->sync_thread);
2884 mddev->sync_thread = NULL;
2885 mddev->recovery = 0;
2887 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2888 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2890 else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2891 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2892 else if (cmd_match(page, "reshape")) {
2894 if (mddev->pers->start_reshape == NULL)
2896 err = mddev->pers->start_reshape(mddev);
2900 if (cmd_match(page, "check"))
2901 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2902 else if (!cmd_match(page, "repair"))
2904 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2905 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2907 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2908 md_wakeup_thread(mddev->thread);
2913 mismatch_cnt_show(mddev_t *mddev, char *page)
2915 return sprintf(page, "%llu\n",
2916 (unsigned long long) mddev->resync_mismatches);
2919 static struct md_sysfs_entry md_scan_mode =
2920 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2923 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
2926 sync_min_show(mddev_t *mddev, char *page)
2928 return sprintf(page, "%d (%s)\n", speed_min(mddev),
2929 mddev->sync_speed_min ? "local": "system");
2933 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2937 if (strncmp(buf, "system", 6)==0) {
2938 mddev->sync_speed_min = 0;
2941 min = simple_strtoul(buf, &e, 10);
2942 if (buf == e || (*e && *e != '\n') || min <= 0)
2944 mddev->sync_speed_min = min;
2948 static struct md_sysfs_entry md_sync_min =
2949 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2952 sync_max_show(mddev_t *mddev, char *page)
2954 return sprintf(page, "%d (%s)\n", speed_max(mddev),
2955 mddev->sync_speed_max ? "local": "system");
2959 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2963 if (strncmp(buf, "system", 6)==0) {
2964 mddev->sync_speed_max = 0;
2967 max = simple_strtoul(buf, &e, 10);
2968 if (buf == e || (*e && *e != '\n') || max <= 0)
2970 mddev->sync_speed_max = max;
2974 static struct md_sysfs_entry md_sync_max =
2975 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2978 degraded_show(mddev_t *mddev, char *page)
2980 return sprintf(page, "%d\n", mddev->degraded);
2982 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
2985 sync_speed_show(mddev_t *mddev, char *page)
2987 unsigned long resync, dt, db;
2988 resync = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active));
2989 dt = ((jiffies - mddev->resync_mark) / HZ);
2991 db = resync - (mddev->resync_mark_cnt);
2992 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2995 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
2998 sync_completed_show(mddev_t *mddev, char *page)
3000 unsigned long max_blocks, resync;
3002 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3003 max_blocks = mddev->resync_max_sectors;
3005 max_blocks = mddev->size << 1;
3007 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
3008 return sprintf(page, "%lu / %lu\n", resync, max_blocks);
3011 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
3014 max_sync_show(mddev_t *mddev, char *page)
3016 if (mddev->resync_max == MaxSector)
3017 return sprintf(page, "max\n");
3019 return sprintf(page, "%llu\n",
3020 (unsigned long long)mddev->resync_max);
3023 max_sync_store(mddev_t *mddev, const char *buf, size_t len)
3025 if (strncmp(buf, "max", 3) == 0)
3026 mddev->resync_max = MaxSector;
3029 unsigned long long max = simple_strtoull(buf, &ep, 10);
3030 if (ep == buf || (*ep != 0 && *ep != '\n'))
3032 if (max < mddev->resync_max &&
3033 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3036 /* Must be a multiple of chunk_size */
3037 if (mddev->chunk_size) {
3038 if (max & (sector_t)((mddev->chunk_size>>9)-1))
3041 mddev->resync_max = max;
3043 wake_up(&mddev->recovery_wait);
3047 static struct md_sysfs_entry md_max_sync =
3048 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
3051 suspend_lo_show(mddev_t *mddev, char *page)
3053 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
3057 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
3060 unsigned long long new = simple_strtoull(buf, &e, 10);
3062 if (mddev->pers->quiesce == NULL)
3064 if (buf == e || (*e && *e != '\n'))
3066 if (new >= mddev->suspend_hi ||
3067 (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
3068 mddev->suspend_lo = new;
3069 mddev->pers->quiesce(mddev, 2);
3074 static struct md_sysfs_entry md_suspend_lo =
3075 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
3079 suspend_hi_show(mddev_t *mddev, char *page)
3081 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
3085 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
3088 unsigned long long new = simple_strtoull(buf, &e, 10);
3090 if (mddev->pers->quiesce == NULL)
3092 if (buf == e || (*e && *e != '\n'))
3094 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
3095 (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
3096 mddev->suspend_hi = new;
3097 mddev->pers->quiesce(mddev, 1);
3098 mddev->pers->quiesce(mddev, 0);
3103 static struct md_sysfs_entry md_suspend_hi =
3104 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
3107 reshape_position_show(mddev_t *mddev, char *page)
3109 if (mddev->reshape_position != MaxSector)
3110 return sprintf(page, "%llu\n",
3111 (unsigned long long)mddev->reshape_position);
3112 strcpy(page, "none\n");
3117 reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
3120 unsigned long long new = simple_strtoull(buf, &e, 10);
3123 if (buf == e || (*e && *e != '\n'))
3125 mddev->reshape_position = new;
3126 mddev->delta_disks = 0;
3127 mddev->new_level = mddev->level;
3128 mddev->new_layout = mddev->layout;
3129 mddev->new_chunk = mddev->chunk_size;
3133 static struct md_sysfs_entry md_reshape_position =
3134 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
3135 reshape_position_store);
3138 static struct attribute *md_default_attrs[] = {
3141 &md_raid_disks.attr,
3142 &md_chunk_size.attr,
3144 &md_resync_start.attr,
3146 &md_new_device.attr,
3147 &md_safe_delay.attr,
3148 &md_array_state.attr,
3149 &md_reshape_position.attr,
3153 static struct attribute *md_redundancy_attrs[] = {
3155 &md_mismatches.attr,
3158 &md_sync_speed.attr,
3159 &md_sync_completed.attr,
3161 &md_suspend_lo.attr,
3162 &md_suspend_hi.attr,
3167 static struct attribute_group md_redundancy_group = {
3169 .attrs = md_redundancy_attrs,
3174 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3176 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3177 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3182 rv = mddev_lock(mddev);
3184 rv = entry->show(mddev, page);
3185 mddev_unlock(mddev);
3191 md_attr_store(struct kobject *kobj, struct attribute *attr,
3192 const char *page, size_t length)
3194 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
3195 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
3200 if (!capable(CAP_SYS_ADMIN))
3202 rv = mddev_lock(mddev);
3204 rv = entry->store(mddev, page, length);
3205 mddev_unlock(mddev);
3210 static void md_free(struct kobject *ko)
3212 mddev_t *mddev = container_of(ko, mddev_t, kobj);
3216 static struct sysfs_ops md_sysfs_ops = {
3217 .show = md_attr_show,
3218 .store = md_attr_store,
3220 static struct kobj_type md_ktype = {
3222 .sysfs_ops = &md_sysfs_ops,
3223 .default_attrs = md_default_attrs,
3228 static struct kobject *md_probe(dev_t dev, int *part, void *data)
3230 static DEFINE_MUTEX(disks_mutex);
3231 mddev_t *mddev = mddev_find(dev);
3232 struct gendisk *disk;
3233 int partitioned = (MAJOR(dev) != MD_MAJOR);
3234 int shift = partitioned ? MdpMinorShift : 0;
3235 int unit = MINOR(dev) >> shift;
3241 mutex_lock(&disks_mutex);
3242 if (mddev->gendisk) {
3243 mutex_unlock(&disks_mutex);
3247 disk = alloc_disk(1 << shift);
3249 mutex_unlock(&disks_mutex);
3253 disk->major = MAJOR(dev);
3254 disk->first_minor = unit << shift;
3256 sprintf(disk->disk_name, "md_d%d", unit);
3258 sprintf(disk->disk_name, "md%d", unit);
3259 disk->fops = &md_fops;
3260 disk->private_data = mddev;
3261 disk->queue = mddev->queue;
3263 mddev->gendisk = disk;
3264 mutex_unlock(&disks_mutex);
3265 error = kobject_init_and_add(&mddev->kobj, &md_ktype, &disk->dev.kobj,
3268 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
3271 kobject_uevent(&mddev->kobj, KOBJ_ADD);
3275 static void md_safemode_timeout(unsigned long data)
3277 mddev_t *mddev = (mddev_t *) data;
3279 mddev->safemode = 1;
3280 md_wakeup_thread(mddev->thread);
3283 static int start_dirty_degraded;
3285 static int do_md_run(mddev_t * mddev)
3289 struct list_head *tmp;
3291 struct gendisk *disk;
3292 struct mdk_personality *pers;
3293 char b[BDEVNAME_SIZE];
3295 if (list_empty(&mddev->disks))
3296 /* cannot run an array with no devices.. */
3303 * Analyze all RAID superblock(s)
3305 if (!mddev->raid_disks) {
3306 if (!mddev->persistent)
3311 chunk_size = mddev->chunk_size;
3314 if (chunk_size > MAX_CHUNK_SIZE) {
3315 printk(KERN_ERR "too big chunk_size: %d > %d\n",
3316 chunk_size, MAX_CHUNK_SIZE);
3320 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
3322 if ( (1 << ffz(~chunk_size)) != chunk_size) {
3323 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
3326 if (chunk_size < PAGE_SIZE) {
3327 printk(KERN_ERR "too small chunk_size: %d < %ld\n",
3328 chunk_size, PAGE_SIZE);
3332 /* devices must have minimum size of one chunk */
3333 rdev_for_each(rdev, tmp, mddev) {
3334 if (test_bit(Faulty, &rdev->flags))
3336 if (rdev->size < chunk_size / 1024) {
3338 "md: Dev %s smaller than chunk_size:"
3340 bdevname(rdev->bdev,b),
3341 (unsigned long long)rdev->size,
3349 if (mddev->level != LEVEL_NONE)
3350 request_module("md-level-%d", mddev->level);
3351 else if (mddev->clevel[0])
3352 request_module("md-%s", mddev->clevel);
3356 * Drop all container device buffers, from now on
3357 * the only valid external interface is through the md
3360 rdev_for_each(rdev, tmp, mddev) {
3361 if (test_bit(Faulty, &rdev->flags))
3363 sync_blockdev(rdev->bdev);
3364 invalidate_bdev(rdev->bdev);
3366 /* perform some consistency tests on the device.
3367 * We don't want the data to overlap the metadata,
3368 * Internal Bitmap issues has handled elsewhere.
3370 if (rdev->data_offset < rdev->sb_offset) {
3372 rdev->data_offset + mddev->size*2
3373 > rdev->sb_offset*2) {
3374 printk("md: %s: data overlaps metadata\n",
3379 if (rdev->sb_offset*2 + rdev->sb_size/512
3380 > rdev->data_offset) {
3381 printk("md: %s: metadata overlaps data\n",
3388 md_probe(mddev->unit, NULL, NULL);
3389 disk = mddev->gendisk;
3393 spin_lock(&pers_lock);
3394 pers = find_pers(mddev->level, mddev->clevel);
3395 if (!pers || !try_module_get(pers->owner)) {
3396 spin_unlock(&pers_lock);
3397 if (mddev->level != LEVEL_NONE)
3398 printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
3401 printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
3406 spin_unlock(&pers_lock);
3407 mddev->level = pers->level;
3408 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3410 if (mddev->reshape_position != MaxSector &&
3411 pers->start_reshape == NULL) {
3412 /* This personality cannot handle reshaping... */
3414 module_put(pers->owner);
3418 if (pers->sync_request) {
3419 /* Warn if this is a potentially silly
3422 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3424 struct list_head *tmp2;
3426 rdev_for_each(rdev, tmp, mddev) {
3427 rdev_for_each(rdev2, tmp2, mddev) {
3429 rdev->bdev->bd_contains ==
3430 rdev2->bdev->bd_contains) {
3432 "%s: WARNING: %s appears to be"
3433 " on the same physical disk as"
3436 bdevname(rdev->bdev,b),
3437 bdevname(rdev2->bdev,b2));
3444 "True protection against single-disk"
3445 " failure might be compromised.\n");
3448 mddev->recovery = 0;
3449 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
3450 mddev->barriers_work = 1;
3451 mddev->ok_start_degraded = start_dirty_degraded;
3454 mddev->ro = 2; /* read-only, but switch on first write */
3456 err = mddev->pers->run(mddev);
3457 if (!err && mddev->pers->sync_request) {
3458 err = bitmap_create(mddev);
3460 printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3461 mdname(mddev), err);
3462 mddev->pers->stop(mddev);
3466 printk(KERN_ERR "md: pers->run() failed ...\n");
3467 module_put(mddev->pers->owner);
3469 bitmap_destroy(mddev);
3472 if (mddev->pers->sync_request) {
3473 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3475 "md: cannot register extra attributes for %s\n",
3477 } else if (mddev->ro == 2) /* auto-readonly not meaningful */
3480 atomic_set(&mddev->writes_pending,0);
3481 mddev->safemode = 0;
3482 mddev->safemode_timer.function = md_safemode_timeout;
3483 mddev->safemode_timer.data = (unsigned long) mddev;
3484 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3487 rdev_for_each(rdev, tmp, mddev)
3488 if (rdev->raid_disk >= 0) {
3490 sprintf(nm, "rd%d", rdev->raid_disk);
3491 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm))
3492 printk("md: cannot register %s for %s\n",
3496 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3499 md_update_sb(mddev, 0);
3501 set_capacity(disk, mddev->array_size<<1);
3503 /* If we call blk_queue_make_request here, it will
3504 * re-initialise max_sectors etc which may have been
3505 * refined inside -> run. So just set the bits we need to set.
3506 * Most initialisation happended when we called
3507 * blk_queue_make_request(..., md_fail_request)
3510 mddev->queue->queuedata = mddev;
3511 mddev->queue->make_request_fn = mddev->pers->make_request;
3513 /* If there is a partially-recovered drive we need to
3514 * start recovery here. If we leave it to md_check_recovery,
3515 * it will remove the drives and not do the right thing
3517 if (mddev->degraded && !mddev->sync_thread) {
3518 struct list_head *rtmp;
3520 rdev_for_each(rdev, rtmp, mddev)
3521 if (rdev->raid_disk >= 0 &&
3522 !test_bit(In_sync, &rdev->flags) &&
3523 !test_bit(Faulty, &rdev->flags))
3524 /* complete an interrupted recovery */
3526 if (spares && mddev->pers->sync_request) {
3527 mddev->recovery = 0;
3528 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3529 mddev->sync_thread = md_register_thread(md_do_sync,
3532 if (!mddev->sync_thread) {
3533 printk(KERN_ERR "%s: could not start resync"
3536 /* leave the spares where they are, it shouldn't hurt */
3537 mddev->recovery = 0;
3541 md_wakeup_thread(mddev->thread);
3542 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
3545 md_new_event(mddev);
3546 kobject_uevent(&mddev->gendisk->dev.kobj, KOBJ_CHANGE);
3550 static int restart_array(mddev_t *mddev)
3552 struct gendisk *disk = mddev->gendisk;
3556 * Complain if it has no devices
3559 if (list_empty(&mddev->disks))
3567 mddev->safemode = 0;
3569 set_disk_ro(disk, 0);
3571 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3574 * Kick recovery or resync if necessary
3576 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3577 md_wakeup_thread(mddev->thread);
3578 md_wakeup_thread(mddev->sync_thread);
3587 /* similar to deny_write_access, but accounts for our holding a reference
3588 * to the file ourselves */
3589 static int deny_bitmap_write_access(struct file * file)
3591 struct inode *inode = file->f_mapping->host;
3593 spin_lock(&inode->i_lock);
3594 if (atomic_read(&inode->i_writecount) > 1) {
3595 spin_unlock(&inode->i_lock);
3598 atomic_set(&inode->i_writecount, -1);
3599 spin_unlock(&inode->i_lock);
3604 static void restore_bitmap_write_access(struct file *file)
3606 struct inode *inode = file->f_mapping->host;
3608 spin_lock(&inode->i_lock);
3609 atomic_set(&inode->i_writecount, 1);
3610 spin_unlock(&inode->i_lock);
3614 * 0 - completely stop and dis-assemble array
3615 * 1 - switch to readonly
3616 * 2 - stop but do not disassemble array
3618 static int do_md_stop(mddev_t * mddev, int mode)
3621 struct gendisk *disk = mddev->gendisk;
3624 if (atomic_read(&mddev->active)>2) {
3625 printk("md: %s still in use.\n",mdname(mddev));
3629 if (mddev->sync_thread) {
3630 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3631 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3632 md_unregister_thread(mddev->sync_thread);
3633 mddev->sync_thread = NULL;
3636 del_timer_sync(&mddev->safemode_timer);
3638 invalidate_partition(disk, 0);
3641 case 1: /* readonly */
3647 case 0: /* disassemble */
3649 bitmap_flush(mddev);
3650 md_super_wait(mddev);
3652 set_disk_ro(disk, 0);
3653 blk_queue_make_request(mddev->queue, md_fail_request);
3654 mddev->pers->stop(mddev);
3655 mddev->queue->merge_bvec_fn = NULL;
3656 mddev->queue->unplug_fn = NULL;
3657 mddev->queue->backing_dev_info.congested_fn = NULL;
3658 if (mddev->pers->sync_request)
3659 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3661 module_put(mddev->pers->owner);
3664 set_capacity(disk, 0);
3670 if (!mddev->in_sync || mddev->flags) {
3671 /* mark array as shutdown cleanly */
3673 md_update_sb(mddev, 1);
3676 set_disk_ro(disk, 1);
3677 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3681 * Free resources if final stop
3685 struct list_head *tmp;
3687 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3689 bitmap_destroy(mddev);
3690 if (mddev->bitmap_file) {
3691 restore_bitmap_write_access(mddev->bitmap_file);
3692 fput(mddev->bitmap_file);
3693 mddev->bitmap_file = NULL;
3695 mddev->bitmap_offset = 0;
3697 rdev_for_each(rdev, tmp, mddev)
3698 if (rdev->raid_disk >= 0) {
3700 sprintf(nm, "rd%d", rdev->raid_disk);
3701 sysfs_remove_link(&mddev->kobj, nm);
3704 /* make sure all md_delayed_delete calls have finished */
3705 flush_scheduled_work();
3707 export_array(mddev);
3709 mddev->array_size = 0;
3711 mddev->raid_disks = 0;
3712 mddev->recovery_cp = 0;
3713 mddev->resync_max = MaxSector;
3714 mddev->reshape_position = MaxSector;
3715 mddev->external = 0;
3716 mddev->persistent = 0;
3718 } else if (mddev->pers)
3719 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3722 md_new_event(mddev);
3728 static void autorun_array(mddev_t *mddev)
3731 struct list_head *tmp;
3734 if (list_empty(&mddev->disks))
3737 printk(KERN_INFO "md: running: ");
3739 rdev_for_each(rdev, tmp, mddev) {
3740 char b[BDEVNAME_SIZE];
3741 printk("<%s>", bdevname(rdev->bdev,b));
3745 err = do_md_run (mddev);
3747 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3748 do_md_stop (mddev, 0);
3753 * lets try to run arrays based on all disks that have arrived
3754 * until now. (those are in pending_raid_disks)
3756 * the method: pick the first pending disk, collect all disks with
3757 * the same UUID, remove all from the pending list and put them into
3758 * the 'same_array' list. Then order this list based on superblock
3759 * update time (freshest comes first), kick out 'old' disks and
3760 * compare superblocks. If everything's fine then run it.
3762 * If "unit" is allocated, then bump its reference count
3764 static void autorun_devices(int part)
3766 struct list_head *tmp;
3767 mdk_rdev_t *rdev0, *rdev;
3769 char b[BDEVNAME_SIZE];
3771 printk(KERN_INFO "md: autorun ...\n");
3772 while (!list_empty(&pending_raid_disks)) {
3775 LIST_HEAD(candidates);
3776 rdev0 = list_entry(pending_raid_disks.next,
3777 mdk_rdev_t, same_set);
3779 printk(KERN_INFO "md: considering %s ...\n",
3780 bdevname(rdev0->bdev,b));
3781 INIT_LIST_HEAD(&candidates);
3782 rdev_for_each_list(rdev, tmp, pending_raid_disks)
3783 if (super_90_load(rdev, rdev0, 0) >= 0) {
3784 printk(KERN_INFO "md: adding %s ...\n",
3785 bdevname(rdev->bdev,b));
3786 list_move(&rdev->same_set, &candidates);
3789 * now we have a set of devices, with all of them having
3790 * mostly sane superblocks. It's time to allocate the
3794 dev = MKDEV(mdp_major,
3795 rdev0->preferred_minor << MdpMinorShift);
3796 unit = MINOR(dev) >> MdpMinorShift;
3798 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3801 if (rdev0->preferred_minor != unit) {
3802 printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3803 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3807 md_probe(dev, NULL, NULL);
3808 mddev = mddev_find(dev);
3811 "md: cannot allocate memory for md drive.\n");
3814 if (mddev_lock(mddev))
3815 printk(KERN_WARNING "md: %s locked, cannot run\n",
3817 else if (mddev->raid_disks || mddev->major_version
3818 || !list_empty(&mddev->disks)) {
3820 "md: %s already running, cannot run %s\n",
3821 mdname(mddev), bdevname(rdev0->bdev,b));
3822 mddev_unlock(mddev);
3824 printk(KERN_INFO "md: created %s\n", mdname(mddev));
3825 mddev->persistent = 1;
3826 rdev_for_each_list(rdev, tmp, candidates) {
3827 list_del_init(&rdev->same_set);
3828 if (bind_rdev_to_array(rdev, mddev))
3831 autorun_array(mddev);
3832 mddev_unlock(mddev);
3834 /* on success, candidates will be empty, on error
3837 rdev_for_each_list(rdev, tmp, candidates)
3841 printk(KERN_INFO "md: ... autorun DONE.\n");
3843 #endif /* !MODULE */
3845 static int get_version(void __user * arg)
3849 ver.major = MD_MAJOR_VERSION;
3850 ver.minor = MD_MINOR_VERSION;
3851 ver.patchlevel = MD_PATCHLEVEL_VERSION;
3853 if (copy_to_user(arg, &ver, sizeof(ver)))
3859 static int get_array_info(mddev_t * mddev, void __user * arg)
3861 mdu_array_info_t info;
3862 int nr,working,active,failed,spare;
3864 struct list_head *tmp;
3866 nr=working=active=failed=spare=0;
3867 rdev_for_each(rdev, tmp, mddev) {
3869 if (test_bit(Faulty, &rdev->flags))
3873 if (test_bit(In_sync, &rdev->flags))
3880 info.major_version = mddev->major_version;
3881 info.minor_version = mddev->minor_version;
3882 info.patch_version = MD_PATCHLEVEL_VERSION;
3883 info.ctime = mddev->ctime;
3884 info.level = mddev->level;
3885 info.size = mddev->size;
3886 if (info.size != mddev->size) /* overflow */
3889 info.raid_disks = mddev->raid_disks;
3890 info.md_minor = mddev->md_minor;
3891 info.not_persistent= !mddev->persistent;
3893 info.utime = mddev->utime;
3896 info.state = (1<<MD_SB_CLEAN);
3897 if (mddev->bitmap && mddev->bitmap_offset)
3898 info.state = (1<<MD_SB_BITMAP_PRESENT);
3899 info.active_disks = active;
3900 info.working_disks = working;
3901 info.failed_disks = failed;
3902 info.spare_disks = spare;
3904 info.layout = mddev->layout;
3905 info.chunk_size = mddev->chunk_size;
3907 if (copy_to_user(arg, &info, sizeof(info)))
3913 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3915 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3916 char *ptr, *buf = NULL;
3919 md_allow_write(mddev);
3921 file = kmalloc(sizeof(*file), GFP_KERNEL);
3925 /* bitmap disabled, zero the first byte and copy out */
3926 if (!mddev->bitmap || !mddev->bitmap->file) {
3927 file->pathname[0] = '\0';
3931 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3935 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3939 strcpy(file->pathname, ptr);
3943 if (copy_to_user(arg, file, sizeof(*file)))
3951 static int get_disk_info(mddev_t * mddev, void __user * arg)
3953 mdu_disk_info_t info;
3957 if (copy_from_user(&info, arg, sizeof(info)))
3962 rdev = find_rdev_nr(mddev, nr);
3964 info.major = MAJOR(rdev->bdev->bd_dev);
3965 info.minor = MINOR(rdev->bdev->bd_dev);
3966 info.raid_disk = rdev->raid_disk;
3968 if (test_bit(Faulty, &rdev->flags))
3969 info.state |= (1<<MD_DISK_FAULTY);
3970 else if (test_bit(In_sync, &rdev->flags)) {
3971 info.state |= (1<<MD_DISK_ACTIVE);
3972 info.state |= (1<<MD_DISK_SYNC);
3974 if (test_bit(WriteMostly, &rdev->flags))
3975 info.state |= (1<<MD_DISK_WRITEMOSTLY);
3977 info.major = info.minor = 0;
3978 info.raid_disk = -1;
3979 info.state = (1<<MD_DISK_REMOVED);
3982 if (copy_to_user(arg, &info, sizeof(info)))
3988 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3990 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3992 dev_t dev = MKDEV(info->major,info->minor);
3994 if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3997 if (!mddev->raid_disks) {
3999 /* expecting a device which has a superblock */
4000 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
4003 "md: md_import_device returned %ld\n",
4005 return PTR_ERR(rdev);
4007 if (!list_empty(&mddev->disks)) {
4008 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
4009 mdk_rdev_t, same_set);
4010 int err = super_types[mddev->major_version]
4011 .load_super(rdev, rdev0, mddev->minor_version);
4014 "md: %s has different UUID to %s\n",
4015 bdevname(rdev->bdev,b),
4016 bdevname(rdev0->bdev,b2));
4021 err = bind_rdev_to_array(rdev, mddev);
4028 * add_new_disk can be used once the array is assembled
4029 * to add "hot spares". They must already have a superblock
4034 if (!mddev->pers->hot_add_disk) {
4036 "%s: personality does not support diskops!\n",
4040 if (mddev->persistent)
4041 rdev = md_import_device(dev, mddev->major_version,
4042 mddev->minor_version);
4044 rdev = md_import_device(dev, -1, -1);
4047 "md: md_import_device returned %ld\n",
4049 return PTR_ERR(rdev);
4051 /* set save_raid_disk if appropriate */
4052 if (!mddev->persistent) {
4053 if (info->state & (1<<MD_DISK_SYNC) &&
4054 info->raid_disk < mddev->raid_disks)
4055 rdev->raid_disk = info->raid_disk;
4057 rdev->raid_disk = -1;
4059 super_types[mddev->major_version].
4060 validate_super(mddev, rdev);
4061 rdev->saved_raid_disk = rdev->raid_disk;
4063 clear_bit(In_sync, &rdev->flags); /* just to be sure */
4064 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4065 set_bit(WriteMostly, &rdev->flags);
4067 rdev->raid_disk = -1;
4068 err = bind_rdev_to_array(rdev, mddev);
4069 if (!err && !mddev->pers->hot_remove_disk) {
4070 /* If there is hot_add_disk but no hot_remove_disk
4071 * then added disks for geometry changes,
4072 * and should be added immediately.
4074 super_types[mddev->major_version].
4075 validate_super(mddev, rdev);
4076 err = mddev->pers->hot_add_disk(mddev, rdev);
4078 unbind_rdev_from_array(rdev);
4083 md_update_sb(mddev, 1);
4084 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4085 md_wakeup_thread(mddev->thread);
4089 /* otherwise, add_new_disk is only allowed
4090 * for major_version==0 superblocks
4092 if (mddev->major_version != 0) {
4093 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
4098 if (!(info->state & (1<<MD_DISK_FAULTY))) {
4100 rdev = md_import_device (dev, -1, 0);
4103 "md: error, md_import_device() returned %ld\n",
4105 return PTR_ERR(rdev);
4107 rdev->desc_nr = info->number;
4108 if (info->raid_disk < mddev->raid_disks)
4109 rdev->raid_disk = info->raid_disk;
4111 rdev->raid_disk = -1;
4113 if (rdev->raid_disk < mddev->raid_disks)
4114 if (info->state & (1<<MD_DISK_SYNC))
4115 set_bit(In_sync, &rdev->flags);
4117 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
4118 set_bit(WriteMostly, &rdev->flags);
4120 if (!mddev->persistent) {
4121 printk(KERN_INFO "md: nonpersistent superblock ...\n");
4122 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4124 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4125 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
4127 err = bind_rdev_to_array(rdev, mddev);
4137 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
4139 char b[BDEVNAME_SIZE];
4145 rdev = find_rdev(mddev, dev);
4149 if (rdev->raid_disk >= 0)
4152 kick_rdev_from_array(rdev);
4153 md_update_sb(mddev, 1);
4154 md_new_event(mddev);
4158 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
4159 bdevname(rdev->bdev,b), mdname(mddev));
4163 static int hot_add_disk(mddev_t * mddev, dev_t dev)
4165 char b[BDEVNAME_SIZE];
4173 if (mddev->major_version != 0) {
4174 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
4175 " version-0 superblocks.\n",
4179 if (!mddev->pers->hot_add_disk) {
4181 "%s: personality does not support diskops!\n",
4186 rdev = md_import_device (dev, -1, 0);
4189 "md: error, md_import_device() returned %ld\n",
4194 if (mddev->persistent)
4195 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
4198 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
4200 size = calc_dev_size(rdev, mddev->chunk_size);
4203 if (test_bit(Faulty, &rdev->flags)) {
4205 "md: can not hot-add faulty %s disk to %s!\n",
4206 bdevname(rdev->bdev,b), mdname(mddev));
4210 clear_bit(In_sync, &rdev->flags);
4212 rdev->saved_raid_disk = -1;
4213 err = bind_rdev_to_array(rdev, mddev);
4218 * The rest should better be atomic, we can have disk failures
4219 * noticed in interrupt contexts ...
4222 if (rdev->desc_nr == mddev->max_disks) {
4223 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
4226 goto abort_unbind_export;
4229 rdev->raid_disk = -1;
4231 md_update_sb(mddev, 1);
4234 * Kick recovery, maybe this spare has to be added to the
4235 * array immediately.
4237 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4238 md_wakeup_thread(mddev->thread);
4239 md_new_event(mddev);
4242 abort_unbind_export:
4243 unbind_rdev_from_array(rdev);
4250 static int set_bitmap_file(mddev_t *mddev, int fd)
4255 if (!mddev->pers->quiesce)
4257 if (mddev->recovery || mddev->sync_thread)
4259 /* we should be able to change the bitmap.. */
4265 return -EEXIST; /* cannot add when bitmap is present */
4266 mddev->bitmap_file = fget(fd);
4268 if (mddev->bitmap_file == NULL) {
4269 printk(KERN_ERR "%s: error: failed to get bitmap file\n",
4274 err = deny_bitmap_write_access(mddev->bitmap_file);
4276 printk(KERN_ERR "%s: error: bitmap file is already in use\n",
4278 fput(mddev->bitmap_file);
4279 mddev->bitmap_file = NULL;
4282 mddev->bitmap_offset = 0; /* file overrides offset */
4283 } else if (mddev->bitmap == NULL)
4284 return -ENOENT; /* cannot remove what isn't there */
4287 mddev->pers->quiesce(mddev, 1);
4289 err = bitmap_create(mddev);
4290 if (fd < 0 || err) {
4291 bitmap_destroy(mddev);
4292 fd = -1; /* make sure to put the file */
4294 mddev->pers->quiesce(mddev, 0);
4297 if (mddev->bitmap_file) {
4298 restore_bitmap_write_access(mddev->bitmap_file);
4299 fput(mddev->bitmap_file);
4301 mddev->bitmap_file = NULL;
4308 * set_array_info is used two different ways
4309 * The original usage is when creating a new array.
4310 * In this usage, raid_disks is > 0 and it together with
4311 * level, size, not_persistent,layout,chunksize determine the
4312 * shape of the array.
4313 * This will always create an array with a type-0.90.0 superblock.
4314 * The newer usage is when assembling an array.
4315 * In this case raid_disks will be 0, and the major_version field is
4316 * use to determine which style super-blocks are to be found on the devices.
4317 * The minor and patch _version numbers are also kept incase the
4318 * super_block handler wishes to interpret them.
4320 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
4323 if (info->raid_disks == 0) {
4324 /* just setting version number for superblock loading */
4325 if (info->major_version < 0 ||
4326 info->major_version >= ARRAY_SIZE(super_types) ||
4327 super_types[info->major_version].name == NULL) {
4328 /* maybe try to auto-load a module? */
4330 "md: superblock version %d not known\n",
4331 info->major_version);
4334 mddev->major_version = info->major_version;
4335 mddev->minor_version = info->minor_version;
4336 mddev->patch_version = info->patch_version;
4337 mddev->persistent = !info->not_persistent;
4340 mddev->major_version = MD_MAJOR_VERSION;
4341 mddev->minor_version = MD_MINOR_VERSION;
4342 mddev->patch_version = MD_PATCHLEVEL_VERSION;
4343 mddev->ctime = get_seconds();
4345 mddev->level = info->level;
4346 mddev->clevel[0] = 0;
4347 mddev->size = info->size;
4348 mddev->raid_disks = info->raid_disks;
4349 /* don't set md_minor, it is determined by which /dev/md* was
4352 if (info->state & (1<<MD_SB_CLEAN))
4353 mddev->recovery_cp = MaxSector;
4355 mddev->recovery_cp = 0;
4356 mddev->persistent = ! info->not_persistent;
4357 mddev->external = 0;
4359 mddev->layout = info->layout;
4360 mddev->chunk_size = info->chunk_size;
4362 mddev->max_disks = MD_SB_DISKS;
4364 if (mddev->persistent)
4366 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4368 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
4369 mddev->bitmap_offset = 0;
4371 mddev->reshape_position = MaxSector;
4374 * Generate a 128 bit UUID
4376 get_random_bytes(mddev->uuid, 16);
4378 mddev->new_level = mddev->level;
4379 mddev->new_chunk = mddev->chunk_size;
4380 mddev->new_layout = mddev->layout;
4381 mddev->delta_disks = 0;
4386 static int update_size(mddev_t *mddev, unsigned long size)
4390 struct list_head *tmp;
4391 int fit = (size == 0);
4393 if (mddev->pers->resize == NULL)
4395 /* The "size" is the amount of each device that is used.
4396 * This can only make sense for arrays with redundancy.
4397 * linear and raid0 always use whatever space is available
4398 * We can only consider changing the size if no resync
4399 * or reconstruction is happening, and if the new size
4400 * is acceptable. It must fit before the sb_offset or,
4401 * if that is <data_offset, it must fit before the
4402 * size of each device.
4403 * If size is zero, we find the largest size that fits.
4405 if (mddev->sync_thread)
4407 rdev_for_each(rdev, tmp, mddev) {
4409 avail = rdev->size * 2;
4411 if (fit && (size == 0 || size > avail/2))
4413 if (avail < ((sector_t)size << 1))
4416 rv = mddev->pers->resize(mddev, (sector_t)size *2);
4418 struct block_device *bdev;
4420 bdev = bdget_disk(mddev->gendisk, 0);
4422 mutex_lock(&bdev->bd_inode->i_mutex);
4423 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4424 mutex_unlock(&bdev->bd_inode->i_mutex);
4431 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4434 /* change the number of raid disks */
4435 if (mddev->pers->check_reshape == NULL)
4437 if (raid_disks <= 0 ||
4438 raid_disks >= mddev->max_disks)
4440 if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4442 mddev->delta_disks = raid_disks - mddev->raid_disks;
4444 rv = mddev->pers->check_reshape(mddev);
4450 * update_array_info is used to change the configuration of an
4452 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4453 * fields in the info are checked against the array.
4454 * Any differences that cannot be handled will cause an error.
4455 * Normally, only one change can be managed at a time.
4457 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4463 /* calculate expected state,ignoring low bits */
4464 if (mddev->bitmap && mddev->bitmap_offset)
4465 state |= (1 << MD_SB_BITMAP_PRESENT);
4467 if (mddev->major_version != info->major_version ||
4468 mddev->minor_version != info->minor_version ||
4469 /* mddev->patch_version != info->patch_version || */
4470 mddev->ctime != info->ctime ||
4471 mddev->level != info->level ||
4472 /* mddev->layout != info->layout || */
4473 !mddev->persistent != info->not_persistent||
4474 mddev->chunk_size != info->chunk_size ||
4475 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4476 ((state^info->state) & 0xfffffe00)
4479 /* Check there is only one change */
4480 if (info->size >= 0 && mddev->size != info->size) cnt++;
4481 if (mddev->raid_disks != info->raid_disks) cnt++;
4482 if (mddev->layout != info->layout) cnt++;
4483 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4484 if (cnt == 0) return 0;
4485 if (cnt > 1) return -EINVAL;
4487 if (mddev->layout != info->layout) {
4489 * we don't need to do anything at the md level, the
4490 * personality will take care of it all.
4492 if (mddev->pers->reconfig == NULL)
4495 return mddev->pers->reconfig(mddev, info->layout, -1);
4497 if (info->size >= 0 && mddev->size != info->size)
4498 rv = update_size(mddev, info->size);
4500 if (mddev->raid_disks != info->raid_disks)
4501 rv = update_raid_disks(mddev, info->raid_disks);
4503 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4504 if (mddev->pers->quiesce == NULL)
4506 if (mddev->recovery || mddev->sync_thread)
4508 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4509 /* add the bitmap */
4512 if (mddev->default_bitmap_offset == 0)
4514 mddev->bitmap_offset = mddev->default_bitmap_offset;
4515 mddev->pers->quiesce(mddev, 1);
4516 rv = bitmap_create(mddev);
4518 bitmap_destroy(mddev);
4519 mddev->pers->quiesce(mddev, 0);
4521 /* remove the bitmap */
4524 if (mddev->bitmap->file)
4526 mddev->pers->quiesce(mddev, 1);
4527 bitmap_destroy(mddev);
4528 mddev->pers->quiesce(mddev, 0);
4529 mddev->bitmap_offset = 0;
4532 md_update_sb(mddev, 1);
4536 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4540 if (mddev->pers == NULL)
4543 rdev = find_rdev(mddev, dev);
4547 md_error(mddev, rdev);
4551 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4553 mddev_t *mddev = bdev->bd_disk->private_data;
4557 geo->cylinders = get_capacity(mddev->gendisk) / 8;
4561 static int md_ioctl(struct inode *inode, struct file *file,
4562 unsigned int cmd, unsigned long arg)
4565 void __user *argp = (void __user *)arg;
4566 mddev_t *mddev = NULL;
4568 if (!capable(CAP_SYS_ADMIN))
4572 * Commands dealing with the RAID driver but not any
4578 err = get_version(argp);
4581 case PRINT_RAID_DEBUG:
4589 autostart_arrays(arg);
4596 * Commands creating/starting a new array:
4599 mddev = inode->i_bdev->bd_disk->private_data;
4606 err = mddev_lock(mddev);
4609 "md: ioctl lock interrupted, reason %d, cmd %d\n",
4616 case SET_ARRAY_INFO:
4618 mdu_array_info_t info;
4620 memset(&info, 0, sizeof(info));
4621 else if (copy_from_user(&info, argp, sizeof(info))) {
4626 err = update_array_info(mddev, &info);
4628 printk(KERN_WARNING "md: couldn't update"
4629 " array info. %d\n", err);
4634 if (!list_empty(&mddev->disks)) {
4636 "md: array %s already has disks!\n",
4641 if (mddev->raid_disks) {
4643 "md: array %s already initialised!\n",
4648 err = set_array_info(mddev, &info);
4650 printk(KERN_WARNING "md: couldn't set"
4651 " array info. %d\n", err);
4661 * Commands querying/configuring an existing array:
4663 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4664 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
4665 if ((!mddev->raid_disks && !mddev->external)
4666 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4667 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
4668 && cmd != GET_BITMAP_FILE) {
4674 * Commands even a read-only array can execute:
4678 case GET_ARRAY_INFO:
4679 err = get_array_info(mddev, argp);
4682 case GET_BITMAP_FILE:
4683 err = get_bitmap_file(mddev, argp);
4687 err = get_disk_info(mddev, argp);
4690 case RESTART_ARRAY_RW:
4691 err = restart_array(mddev);
4695 err = do_md_stop (mddev, 0);
4699 err = do_md_stop (mddev, 1);
4703 * We have a problem here : there is no easy way to give a CHS
4704 * virtual geometry. We currently pretend that we have a 2 heads
4705 * 4 sectors (with a BIG number of cylinders...). This drives
4706 * dosfs just mad... ;-)
4711 * The remaining ioctls are changing the state of the
4712 * superblock, so we do not allow them on read-only arrays.
4713 * However non-MD ioctls (e.g. get-size) will still come through
4714 * here and hit the 'default' below, so only disallow
4715 * 'md' ioctls, and switch to rw mode if started auto-readonly.
4717 if (_IOC_TYPE(cmd) == MD_MAJOR &&
4718 mddev->ro && mddev->pers) {
4719 if (mddev->ro == 2) {
4721 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4722 md_wakeup_thread(mddev->thread);
4734 mdu_disk_info_t info;
4735 if (copy_from_user(&info, argp, sizeof(info)))
4738 err = add_new_disk(mddev, &info);
4742 case HOT_REMOVE_DISK:
4743 err = hot_remove_disk(mddev, new_decode_dev(arg));
4747 err = hot_add_disk(mddev, new_decode_dev(arg));
4750 case SET_DISK_FAULTY:
4751 err = set_disk_faulty(mddev, new_decode_dev(arg));
4755 err = do_md_run (mddev);
4758 case SET_BITMAP_FILE:
4759 err = set_bitmap_file(mddev, (int)arg);
4769 mddev_unlock(mddev);
4779 static int md_open(struct inode *inode, struct file *file)
4782 * Succeed if we can lock the mddev, which confirms that
4783 * it isn't being stopped right now.
4785 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4788 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
4793 mddev_unlock(mddev);
4795 check_disk_change(inode->i_bdev);
4800 static int md_release(struct inode *inode, struct file * file)
4802 mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4810 static int md_media_changed(struct gendisk *disk)
4812 mddev_t *mddev = disk->private_data;
4814 return mddev->changed;
4817 static int md_revalidate(struct gendisk *disk)
4819 mddev_t *mddev = disk->private_data;
4824 static struct block_device_operations md_fops =
4826 .owner = THIS_MODULE,
4828 .release = md_release,
4830 .getgeo = md_getgeo,
4831 .media_changed = md_media_changed,
4832 .revalidate_disk= md_revalidate,
4835 static int md_thread(void * arg)
4837 mdk_thread_t *thread = arg;
4840 * md_thread is a 'system-thread', it's priority should be very
4841 * high. We avoid resource deadlocks individually in each
4842 * raid personality. (RAID5 does preallocation) We also use RR and
4843 * the very same RT priority as kswapd, thus we will never get
4844 * into a priority inversion deadlock.
4846 * we definitely have to have equal or higher priority than
4847 * bdflush, otherwise bdflush will deadlock if there are too
4848 * many dirty RAID5 blocks.
4851 allow_signal(SIGKILL);
4852 while (!kthread_should_stop()) {
4854 /* We need to wait INTERRUPTIBLE so that
4855 * we don't add to the load-average.
4856 * That means we need to be sure no signals are
4859 if (signal_pending(current))
4860 flush_signals(current);
4862 wait_event_interruptible_timeout
4864 test_bit(THREAD_WAKEUP, &thread->flags)
4865 || kthread_should_stop(),
4868 clear_bit(THREAD_WAKEUP, &thread->flags);
4870 thread->run(thread->mddev);
4876 void md_wakeup_thread(mdk_thread_t *thread)
4879 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4880 set_bit(THREAD_WAKEUP, &thread->flags);
4881 wake_up(&thread->wqueue);
4885 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4888 mdk_thread_t *thread;
4890 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4894 init_waitqueue_head(&thread->wqueue);
4897 thread->mddev = mddev;
4898 thread->timeout = MAX_SCHEDULE_TIMEOUT;
4899 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4900 if (IS_ERR(thread->tsk)) {
4907 void md_unregister_thread(mdk_thread_t *thread)
4909 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
4911 kthread_stop(thread->tsk);
4915 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4922 if (!rdev || test_bit(Faulty, &rdev->flags))
4925 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4927 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4928 __builtin_return_address(0),__builtin_return_address(1),
4929 __builtin_return_address(2),__builtin_return_address(3));
4933 if (!mddev->pers->error_handler)
4935 mddev->pers->error_handler(mddev,rdev);
4936 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4937 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4938 md_wakeup_thread(mddev->thread);
4939 md_new_event_inintr(mddev);
4942 /* seq_file implementation /proc/mdstat */
4944 static void status_unused(struct seq_file *seq)
4948 struct list_head *tmp;
4950 seq_printf(seq, "unused devices: ");
4952 rdev_for_each_list(rdev, tmp, pending_raid_disks) {
4953 char b[BDEVNAME_SIZE];
4955 seq_printf(seq, "%s ",
4956 bdevname(rdev->bdev,b));
4959 seq_printf(seq, "<none>");
4961 seq_printf(seq, "\n");
4965 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4967 sector_t max_blocks, resync, res;
4968 unsigned long dt, db, rt;
4970 unsigned int per_milli;
4972 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4974 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4975 max_blocks = mddev->resync_max_sectors >> 1;
4977 max_blocks = mddev->size;
4980 * Should not happen.
4986 /* Pick 'scale' such that (resync>>scale)*1000 will fit
4987 * in a sector_t, and (max_blocks>>scale) will fit in a
4988 * u32, as those are the requirements for sector_div.
4989 * Thus 'scale' must be at least 10
4992 if (sizeof(sector_t) > sizeof(unsigned long)) {
4993 while ( max_blocks/2 > (1ULL<<(scale+32)))
4996 res = (resync>>scale)*1000;
4997 sector_div(res, (u32)((max_blocks>>scale)+1));
5001 int i, x = per_milli/50, y = 20-x;
5002 seq_printf(seq, "[");
5003 for (i = 0; i < x; i++)
5004 seq_printf(seq, "=");
5005 seq_printf(seq, ">");
5006 for (i = 0; i < y; i++)
5007 seq_printf(seq, ".");
5008 seq_printf(seq, "] ");
5010 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
5011 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
5013 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
5015 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
5016 "resync" : "recovery"))),
5017 per_milli/10, per_milli % 10,
5018 (unsigned long long) resync,
5019 (unsigned long long) max_blocks);
5022 * We do not want to overflow, so the order of operands and
5023 * the * 100 / 100 trick are important. We do a +1 to be
5024 * safe against division by zero. We only estimate anyway.
5026 * dt: time from mark until now
5027 * db: blocks written from mark until now
5028 * rt: remaining time
5030 dt = ((jiffies - mddev->resync_mark) / HZ);
5032 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
5033 - mddev->resync_mark_cnt;
5034 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100;
5036 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
5038 seq_printf(seq, " speed=%ldK/sec", db/2/dt);
5041 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
5043 struct list_head *tmp;
5053 spin_lock(&all_mddevs_lock);
5054 list_for_each(tmp,&all_mddevs)
5056 mddev = list_entry(tmp, mddev_t, all_mddevs);
5058 spin_unlock(&all_mddevs_lock);
5061 spin_unlock(&all_mddevs_lock);
5063 return (void*)2;/* tail */
5067 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
5069 struct list_head *tmp;
5070 mddev_t *next_mddev, *mddev = v;
5076 spin_lock(&all_mddevs_lock);
5078 tmp = all_mddevs.next;
5080 tmp = mddev->all_mddevs.next;
5081 if (tmp != &all_mddevs)
5082 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
5084 next_mddev = (void*)2;
5087 spin_unlock(&all_mddevs_lock);
5095 static void md_seq_stop(struct seq_file *seq, void *v)
5099 if (mddev && v != (void*)1 && v != (void*)2)
5103 struct mdstat_info {
5107 static int md_seq_show(struct seq_file *seq, void *v)
5111 struct list_head *tmp2;
5113 struct mdstat_info *mi = seq->private;
5114 struct bitmap *bitmap;
5116 if (v == (void*)1) {
5117 struct mdk_personality *pers;
5118 seq_printf(seq, "Personalities : ");
5119 spin_lock(&pers_lock);
5120 list_for_each_entry(pers, &pers_list, list)
5121 seq_printf(seq, "[%s] ", pers->name);
5123 spin_unlock(&pers_lock);
5124 seq_printf(seq, "\n");
5125 mi->event = atomic_read(&md_event_count);
5128 if (v == (void*)2) {
5133 if (mddev_lock(mddev) < 0)
5136 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
5137 seq_printf(seq, "%s : %sactive", mdname(mddev),
5138 mddev->pers ? "" : "in");
5141 seq_printf(seq, " (read-only)");
5143 seq_printf(seq, " (auto-read-only)");
5144 seq_printf(seq, " %s", mddev->pers->name);
5148 rdev_for_each(rdev, tmp2, mddev) {
5149 char b[BDEVNAME_SIZE];
5150 seq_printf(seq, " %s[%d]",
5151 bdevname(rdev->bdev,b), rdev->desc_nr);
5152 if (test_bit(WriteMostly, &rdev->flags))
5153 seq_printf(seq, "(W)");
5154 if (test_bit(Faulty, &rdev->flags)) {
5155 seq_printf(seq, "(F)");
5157 } else if (rdev->raid_disk < 0)
5158 seq_printf(seq, "(S)"); /* spare */
5162 if (!list_empty(&mddev->disks)) {
5164 seq_printf(seq, "\n %llu blocks",
5165 (unsigned long long)mddev->array_size);
5167 seq_printf(seq, "\n %llu blocks",
5168 (unsigned long long)size);
5170 if (mddev->persistent) {
5171 if (mddev->major_version != 0 ||
5172 mddev->minor_version != 90) {
5173 seq_printf(seq," super %d.%d",
5174 mddev->major_version,
5175 mddev->minor_version);
5177 } else if (mddev->external)
5178 seq_printf(seq, " super external:%s",
5179 mddev->metadata_type);
5181 seq_printf(seq, " super non-persistent");
5184 mddev->pers->status (seq, mddev);
5185 seq_printf(seq, "\n ");
5186 if (mddev->pers->sync_request) {
5187 if (mddev->curr_resync > 2) {
5188 status_resync (seq, mddev);
5189 seq_printf(seq, "\n ");
5190 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
5191 seq_printf(seq, "\tresync=DELAYED\n ");
5192 else if (mddev->recovery_cp < MaxSector)
5193 seq_printf(seq, "\tresync=PENDING\n ");
5196 seq_printf(seq, "\n ");
5198 if ((bitmap = mddev->bitmap)) {
5199 unsigned long chunk_kb;
5200 unsigned long flags;
5201 spin_lock_irqsave(&bitmap->lock, flags);
5202 chunk_kb = bitmap->chunksize >> 10;
5203 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
5205 bitmap->pages - bitmap->missing_pages,
5207 (bitmap->pages - bitmap->missing_pages)
5208 << (PAGE_SHIFT - 10),
5209 chunk_kb ? chunk_kb : bitmap->chunksize,
5210 chunk_kb ? "KB" : "B");
5212 seq_printf(seq, ", file: ");
5213 seq_path(seq, &bitmap->file->f_path, " \t\n");
5216 seq_printf(seq, "\n");
5217 spin_unlock_irqrestore(&bitmap->lock, flags);
5220 seq_printf(seq, "\n");
5222 mddev_unlock(mddev);
5227 static struct seq_operations md_seq_ops = {
5228 .start = md_seq_start,
5229 .next = md_seq_next,
5230 .stop = md_seq_stop,
5231 .show = md_seq_show,
5234 static int md_seq_open(struct inode *inode, struct file *file)
5237 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
5241 error = seq_open(file, &md_seq_ops);
5245 struct seq_file *p = file->private_data;
5247 mi->event = atomic_read(&md_event_count);
5252 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
5254 struct seq_file *m = filp->private_data;
5255 struct mdstat_info *mi = m->private;
5258 poll_wait(filp, &md_event_waiters, wait);
5260 /* always allow read */
5261 mask = POLLIN | POLLRDNORM;
5263 if (mi->event != atomic_read(&md_event_count))
5264 mask |= POLLERR | POLLPRI;
5268 static const struct file_operations md_seq_fops = {
5269 .owner = THIS_MODULE,
5270 .open = md_seq_open,
5272 .llseek = seq_lseek,
5273 .release = seq_release_private,
5274 .poll = mdstat_poll,
5277 int register_md_personality(struct mdk_personality *p)
5279 spin_lock(&pers_lock);
5280 list_add_tail(&p->list, &pers_list);
5281 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
5282 spin_unlock(&pers_lock);
5286 int unregister_md_personality(struct mdk_personality *p)
5288 printk(KERN_INFO "md: %s personality unregistered\n", p->name);
5289 spin_lock(&pers_lock);
5290 list_del_init(&p->list);
5291 spin_unlock(&pers_lock);
5295 static int is_mddev_idle(mddev_t *mddev)
5298 struct list_head *tmp;
5303 rdev_for_each(rdev, tmp, mddev) {
5304 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
5305 curr_events = disk_stat_read(disk, sectors[0]) +
5306 disk_stat_read(disk, sectors[1]) -
5307 atomic_read(&disk->sync_io);
5308 /* sync IO will cause sync_io to increase before the disk_stats
5309 * as sync_io is counted when a request starts, and
5310 * disk_stats is counted when it completes.
5311 * So resync activity will cause curr_events to be smaller than
5312 * when there was no such activity.
5313 * non-sync IO will cause disk_stat to increase without
5314 * increasing sync_io so curr_events will (eventually)
5315 * be larger than it was before. Once it becomes
5316 * substantially larger, the test below will cause
5317 * the array to appear non-idle, and resync will slow
5319 * If there is a lot of outstanding resync activity when
5320 * we set last_event to curr_events, then all that activity
5321 * completing might cause the array to appear non-idle
5322 * and resync will be slowed down even though there might
5323 * not have been non-resync activity. This will only
5324 * happen once though. 'last_events' will soon reflect
5325 * the state where there is little or no outstanding
5326 * resync requests, and further resync activity will
5327 * always make curr_events less than last_events.
5330 if (curr_events - rdev->last_events > 4096) {
5331 rdev->last_events = curr_events;
5338 void md_done_sync(mddev_t *mddev, int blocks, int ok)
5340 /* another "blocks" (512byte) blocks have been synced */
5341 atomic_sub(blocks, &mddev->recovery_active);
5342 wake_up(&mddev->recovery_wait);
5344 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5345 md_wakeup_thread(mddev->thread);
5346 // stop recovery, signal do_sync ....
5351 /* md_write_start(mddev, bi)
5352 * If we need to update some array metadata (e.g. 'active' flag
5353 * in superblock) before writing, schedule a superblock update
5354 * and wait for it to complete.
5356 void md_write_start(mddev_t *mddev, struct bio *bi)
5358 if (bio_data_dir(bi) != WRITE)
5361 BUG_ON(mddev->ro == 1);
5362 if (mddev->ro == 2) {
5363 /* need to switch to read/write */
5365 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5366 md_wakeup_thread(mddev->thread);
5367 md_wakeup_thread(mddev->sync_thread);
5369 atomic_inc(&mddev->writes_pending);
5370 if (mddev->in_sync) {
5371 spin_lock_irq(&mddev->write_lock);
5372 if (mddev->in_sync) {
5374 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5375 md_wakeup_thread(mddev->thread);
5377 spin_unlock_irq(&mddev->write_lock);
5379 wait_event(mddev->sb_wait, mddev->flags==0);
5382 void md_write_end(mddev_t *mddev)
5384 if (atomic_dec_and_test(&mddev->writes_pending)) {
5385 if (mddev->safemode == 2)
5386 md_wakeup_thread(mddev->thread);
5387 else if (mddev->safemode_delay)
5388 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
5392 /* md_allow_write(mddev)
5393 * Calling this ensures that the array is marked 'active' so that writes
5394 * may proceed without blocking. It is important to call this before
5395 * attempting a GFP_KERNEL allocation while holding the mddev lock.
5396 * Must be called with mddev_lock held.
5398 void md_allow_write(mddev_t *mddev)
5405 spin_lock_irq(&mddev->write_lock);
5406 if (mddev->in_sync) {
5408 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5409 if (mddev->safemode_delay &&
5410 mddev->safemode == 0)
5411 mddev->safemode = 1;
5412 spin_unlock_irq(&mddev->write_lock);
5413 md_update_sb(mddev, 0);
5415 spin_unlock_irq(&mddev->write_lock);
5417 EXPORT_SYMBOL_GPL(md_allow_write);
5419 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
5421 #define SYNC_MARKS 10
5422 #define SYNC_MARK_STEP (3*HZ)
5423 void md_do_sync(mddev_t *mddev)
5426 unsigned int currspeed = 0,
5428 sector_t max_sectors,j, io_sectors;
5429 unsigned long mark[SYNC_MARKS];
5430 sector_t mark_cnt[SYNC_MARKS];
5432 struct list_head *tmp;
5433 sector_t last_check;
5435 struct list_head *rtmp;
5439 /* just incase thread restarts... */
5440 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5442 if (mddev->ro) /* never try to sync a read-only array */
5445 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5446 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
5447 desc = "data-check";
5448 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5449 desc = "requested-resync";
5452 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5457 /* we overload curr_resync somewhat here.
5458 * 0 == not engaged in resync at all
5459 * 2 == checking that there is no conflict with another sync
5460 * 1 == like 2, but have yielded to allow conflicting resync to
5462 * other == active in resync - this many blocks
5464 * Before starting a resync we must have set curr_resync to
5465 * 2, and then checked that every "conflicting" array has curr_resync
5466 * less than ours. When we find one that is the same or higher
5467 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
5468 * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5469 * This will mean we have to start checking from the beginning again.
5474 mddev->curr_resync = 2;
5477 if (kthread_should_stop()) {
5478 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5481 for_each_mddev(mddev2, tmp) {
5482 if (mddev2 == mddev)
5484 if (mddev2->curr_resync &&
5485 match_mddev_units(mddev,mddev2)) {
5487 if (mddev < mddev2 && mddev->curr_resync == 2) {
5488 /* arbitrarily yield */
5489 mddev->curr_resync = 1;
5490 wake_up(&resync_wait);
5492 if (mddev > mddev2 && mddev->curr_resync == 1)
5493 /* no need to wait here, we can wait the next
5494 * time 'round when curr_resync == 2
5497 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5498 if (!kthread_should_stop() &&
5499 mddev2->curr_resync >= mddev->curr_resync) {
5500 printk(KERN_INFO "md: delaying %s of %s"
5501 " until %s has finished (they"
5502 " share one or more physical units)\n",
5503 desc, mdname(mddev), mdname(mddev2));
5506 finish_wait(&resync_wait, &wq);
5509 finish_wait(&resync_wait, &wq);
5512 } while (mddev->curr_resync < 2);
5515 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5516 /* resync follows the size requested by the personality,
5517 * which defaults to physical size, but can be virtual size
5519 max_sectors = mddev->resync_max_sectors;
5520 mddev->resync_mismatches = 0;
5521 /* we don't use the checkpoint if there's a bitmap */
5522 if (!mddev->bitmap &&
5523 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5524 j = mddev->recovery_cp;
5525 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5526 max_sectors = mddev->size << 1;
5528 /* recovery follows the physical size of devices */
5529 max_sectors = mddev->size << 1;
5531 rdev_for_each(rdev, rtmp, mddev)
5532 if (rdev->raid_disk >= 0 &&
5533 !test_bit(Faulty, &rdev->flags) &&
5534 !test_bit(In_sync, &rdev->flags) &&
5535 rdev->recovery_offset < j)
5536 j = rdev->recovery_offset;
5539 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
5540 printk(KERN_INFO "md: minimum _guaranteed_ speed:"
5541 " %d KB/sec/disk.\n", speed_min(mddev));
5542 printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5543 "(but not more than %d KB/sec) for %s.\n",
5544 speed_max(mddev), desc);
5546 is_mddev_idle(mddev); /* this also initializes IO event counters */
5549 for (m = 0; m < SYNC_MARKS; m++) {
5551 mark_cnt[m] = io_sectors;
5554 mddev->resync_mark = mark[last_mark];
5555 mddev->resync_mark_cnt = mark_cnt[last_mark];
5558 * Tune reconstruction:
5560 window = 32*(PAGE_SIZE/512);
5561 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5562 window/2,(unsigned long long) max_sectors/2);
5564 atomic_set(&mddev->recovery_active, 0);
5565 init_waitqueue_head(&mddev->recovery_wait);
5570 "md: resuming %s of %s from checkpoint.\n",
5571 desc, mdname(mddev));
5572 mddev->curr_resync = j;
5575 while (j < max_sectors) {
5579 if (j >= mddev->resync_max) {
5580 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5581 wait_event(mddev->recovery_wait,
5582 mddev->resync_max > j
5583 || kthread_should_stop());
5585 if (kthread_should_stop())
5587 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5588 currspeed < speed_min(mddev));
5590 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5594 if (!skipped) { /* actual IO requested */
5595 io_sectors += sectors;
5596 atomic_add(sectors, &mddev->recovery_active);
5600 if (j>1) mddev->curr_resync = j;
5601 mddev->curr_mark_cnt = io_sectors;
5602 if (last_check == 0)
5603 /* this is the earliers that rebuilt will be
5604 * visible in /proc/mdstat
5606 md_new_event(mddev);
5608 if (last_check + window > io_sectors || j == max_sectors)
5611 last_check = io_sectors;
5613 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5614 test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5618 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5620 int next = (last_mark+1) % SYNC_MARKS;
5622 mddev->resync_mark = mark[next];
5623 mddev->resync_mark_cnt = mark_cnt[next];
5624 mark[next] = jiffies;
5625 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5630 if (kthread_should_stop())
5635 * this loop exits only if either when we are slower than
5636 * the 'hard' speed limit, or the system was IO-idle for
5638 * the system might be non-idle CPU-wise, but we only care
5639 * about not overloading the IO subsystem. (things like an
5640 * e2fsck being done on the RAID array should execute fast)
5642 blk_unplug(mddev->queue);
5645 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5646 /((jiffies-mddev->resync_mark)/HZ +1) +1;
5648 if (currspeed > speed_min(mddev)) {
5649 if ((currspeed > speed_max(mddev)) ||
5650 !is_mddev_idle(mddev)) {
5656 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc);
5658 * this also signals 'finished resyncing' to md_stop
5661 blk_unplug(mddev->queue);
5663 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5665 /* tell personality that we are finished */
5666 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5668 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5669 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5670 mddev->curr_resync > 2) {
5671 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5672 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5673 if (mddev->curr_resync >= mddev->recovery_cp) {
5675 "md: checkpointing %s of %s.\n",
5676 desc, mdname(mddev));
5677 mddev->recovery_cp = mddev->curr_resync;
5680 mddev->recovery_cp = MaxSector;
5682 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5683 mddev->curr_resync = MaxSector;
5684 rdev_for_each(rdev, rtmp, mddev)
5685 if (rdev->raid_disk >= 0 &&
5686 !test_bit(Faulty, &rdev->flags) &&
5687 !test_bit(In_sync, &rdev->flags) &&
5688 rdev->recovery_offset < mddev->curr_resync)
5689 rdev->recovery_offset = mddev->curr_resync;
5692 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5695 mddev->curr_resync = 0;
5696 mddev->resync_max = MaxSector;
5697 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
5698 wake_up(&resync_wait);
5699 set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5700 md_wakeup_thread(mddev->thread);
5705 * got a signal, exit.
5708 "md: md_do_sync() got signal ... exiting\n");
5709 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5713 EXPORT_SYMBOL_GPL(md_do_sync);
5716 static int remove_and_add_spares(mddev_t *mddev)
5719 struct list_head *rtmp;
5722 rdev_for_each(rdev, rtmp, mddev)
5723 if (rdev->raid_disk >= 0 &&
5725 (test_bit(Faulty, &rdev->flags) ||
5726 ! test_bit(In_sync, &rdev->flags)) &&
5727 atomic_read(&rdev->nr_pending)==0) {
5728 if (mddev->pers->hot_remove_disk(
5729 mddev, rdev->raid_disk)==0) {
5731 sprintf(nm,"rd%d", rdev->raid_disk);
5732 sysfs_remove_link(&mddev->kobj, nm);
5733 rdev->raid_disk = -1;
5737 if (mddev->degraded) {
5738 rdev_for_each(rdev, rtmp, mddev)
5739 if (rdev->raid_disk < 0
5740 && !test_bit(Faulty, &rdev->flags)) {
5741 rdev->recovery_offset = 0;
5742 if (mddev->pers->hot_add_disk(mddev,rdev)) {
5744 sprintf(nm, "rd%d", rdev->raid_disk);
5745 if (sysfs_create_link(&mddev->kobj,
5748 "md: cannot register "
5752 md_new_event(mddev);
5760 * This routine is regularly called by all per-raid-array threads to
5761 * deal with generic issues like resync and super-block update.
5762 * Raid personalities that don't have a thread (linear/raid0) do not
5763 * need this as they never do any recovery or update the superblock.
5765 * It does not do any resync itself, but rather "forks" off other threads
5766 * to do that as needed.
5767 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5768 * "->recovery" and create a thread at ->sync_thread.
5769 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5770 * and wakeups up this thread which will reap the thread and finish up.
5771 * This thread also removes any faulty devices (with nr_pending == 0).
5773 * The overall approach is:
5774 * 1/ if the superblock needs updating, update it.
5775 * 2/ If a recovery thread is running, don't do anything else.
5776 * 3/ If recovery has finished, clean up, possibly marking spares active.
5777 * 4/ If there are any faulty devices, remove them.
5778 * 5/ If array is degraded, try to add spares devices
5779 * 6/ If array has spares or is not in-sync, start a resync thread.
5781 void md_check_recovery(mddev_t *mddev)
5784 struct list_head *rtmp;
5788 bitmap_daemon_work(mddev->bitmap);
5793 if (signal_pending(current)) {
5794 if (mddev->pers->sync_request) {
5795 printk(KERN_INFO "md: %s in immediate safe mode\n",
5797 mddev->safemode = 2;
5799 flush_signals(current);
5803 (mddev->flags && !mddev->external) ||
5804 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5805 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5806 (mddev->safemode == 1) ||
5807 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5808 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5812 if (mddev_trylock(mddev)) {
5815 spin_lock_irq(&mddev->write_lock);
5816 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5817 !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5819 if (mddev->persistent)
5820 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
5822 if (mddev->safemode == 1)
5823 mddev->safemode = 0;
5824 spin_unlock_irq(&mddev->write_lock);
5827 md_update_sb(mddev, 0);
5830 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5831 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5832 /* resync/recovery still happening */
5833 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5836 if (mddev->sync_thread) {
5837 /* resync has finished, collect result */
5838 md_unregister_thread(mddev->sync_thread);
5839 mddev->sync_thread = NULL;
5840 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5841 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5843 /* activate any spares */
5844 mddev->pers->spare_active(mddev);
5846 md_update_sb(mddev, 1);
5848 /* if array is no-longer degraded, then any saved_raid_disk
5849 * information must be scrapped
5851 if (!mddev->degraded)
5852 rdev_for_each(rdev, rtmp, mddev)
5853 rdev->saved_raid_disk = -1;
5855 mddev->recovery = 0;
5856 /* flag recovery needed just to double check */
5857 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5858 md_new_event(mddev);
5861 /* Clear some bits that don't mean anything, but
5864 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5865 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5866 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5867 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5869 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5871 /* no recovery is running.
5872 * remove any failed drives, then
5873 * add spares if possible.
5874 * Spare are also removed and re-added, to allow
5875 * the personality to fail the re-add.
5878 if (mddev->reshape_position != MaxSector) {
5879 if (mddev->pers->check_reshape(mddev) != 0)
5880 /* Cannot proceed */
5882 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
5883 } else if ((spares = remove_and_add_spares(mddev))) {
5884 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5885 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5886 } else if (mddev->recovery_cp < MaxSector) {
5887 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5888 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5889 /* nothing to be done ... */
5892 if (mddev->pers->sync_request) {
5893 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5894 if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5895 /* We are adding a device or devices to an array
5896 * which has the bitmap stored on all devices.
5897 * So make sure all bitmap pages get written
5899 bitmap_write_all(mddev->bitmap);
5901 mddev->sync_thread = md_register_thread(md_do_sync,
5904 if (!mddev->sync_thread) {
5905 printk(KERN_ERR "%s: could not start resync"
5908 /* leave the spares where they are, it shouldn't hurt */
5909 mddev->recovery = 0;
5911 md_wakeup_thread(mddev->sync_thread);
5912 md_new_event(mddev);
5915 mddev_unlock(mddev);
5919 static int md_notify_reboot(struct notifier_block *this,
5920 unsigned long code, void *x)
5922 struct list_head *tmp;
5925 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5927 printk(KERN_INFO "md: stopping all md devices.\n");
5929 for_each_mddev(mddev, tmp)
5930 if (mddev_trylock(mddev)) {
5931 do_md_stop (mddev, 1);
5932 mddev_unlock(mddev);
5935 * certain more exotic SCSI devices are known to be
5936 * volatile wrt too early system reboots. While the
5937 * right place to handle this issue is the given
5938 * driver, we do want to have a safe RAID driver ...
5945 static struct notifier_block md_notifier = {
5946 .notifier_call = md_notify_reboot,
5948 .priority = INT_MAX, /* before any real devices */
5951 static void md_geninit(void)
5953 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5955 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
5958 static int __init md_init(void)
5960 if (register_blkdev(MAJOR_NR, "md"))
5962 if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5963 unregister_blkdev(MAJOR_NR, "md");
5966 blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE,
5967 md_probe, NULL, NULL);
5968 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
5969 md_probe, NULL, NULL);
5971 register_reboot_notifier(&md_notifier);
5972 raid_table_header = register_sysctl_table(raid_root_table);
5982 * Searches all registered partitions for autorun RAID arrays
5986 static LIST_HEAD(all_detected_devices);
5987 struct detected_devices_node {
5988 struct list_head list;
5992 void md_autodetect_dev(dev_t dev)
5994 struct detected_devices_node *node_detected_dev;
5996 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
5997 if (node_detected_dev) {
5998 node_detected_dev->dev = dev;
5999 list_add_tail(&node_detected_dev->list, &all_detected_devices);
6001 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
6002 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
6007 static void autostart_arrays(int part)
6010 struct detected_devices_node *node_detected_dev;
6012 int i_scanned, i_passed;
6017 printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
6019 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
6021 node_detected_dev = list_entry(all_detected_devices.next,
6022 struct detected_devices_node, list);
6023 list_del(&node_detected_dev->list);
6024 dev = node_detected_dev->dev;
6025 kfree(node_detected_dev);
6026 rdev = md_import_device(dev,0, 90);
6030 if (test_bit(Faulty, &rdev->flags)) {
6034 set_bit(AutoDetected, &rdev->flags);
6035 list_add(&rdev->same_set, &pending_raid_disks);
6039 printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
6040 i_scanned, i_passed);
6042 autorun_devices(part);
6045 #endif /* !MODULE */
6047 static __exit void md_exit(void)
6050 struct list_head *tmp;
6052 blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS);
6053 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
6055 unregister_blkdev(MAJOR_NR,"md");
6056 unregister_blkdev(mdp_major, "mdp");
6057 unregister_reboot_notifier(&md_notifier);
6058 unregister_sysctl_table(raid_table_header);
6059 remove_proc_entry("mdstat", NULL);
6060 for_each_mddev(mddev, tmp) {
6061 struct gendisk *disk = mddev->gendisk;
6064 export_array(mddev);
6067 mddev->gendisk = NULL;
6072 subsys_initcall(md_init);
6073 module_exit(md_exit)
6075 static int get_ro(char *buffer, struct kernel_param *kp)
6077 return sprintf(buffer, "%d", start_readonly);
6079 static int set_ro(const char *val, struct kernel_param *kp)
6082 int num = simple_strtoul(val, &e, 10);
6083 if (*val && (*e == '\0' || *e == '\n')) {
6084 start_readonly = num;
6090 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
6091 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
6094 EXPORT_SYMBOL(register_md_personality);
6095 EXPORT_SYMBOL(unregister_md_personality);
6096 EXPORT_SYMBOL(md_error);
6097 EXPORT_SYMBOL(md_done_sync);
6098 EXPORT_SYMBOL(md_write_start);
6099 EXPORT_SYMBOL(md_write_end);
6100 EXPORT_SYMBOL(md_register_thread);
6101 EXPORT_SYMBOL(md_unregister_thread);
6102 EXPORT_SYMBOL(md_wakeup_thread);
6103 EXPORT_SYMBOL(md_check_recovery);
6104 MODULE_LICENSE("GPL");
6106 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);