Merge tag 'backport/v3.14.24-ltsi-rc1/phy-rcar-gen2-usb-to-v3.15' into backport/v3...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/kthread.h>
36 #include <linux/blkdev.h>
37 #include <linux/sysctl.h>
38 #include <linux/seq_file.h>
39 #include <linux/fs.h>
40 #include <linux/poll.h>
41 #include <linux/ctype.h>
42 #include <linux/string.h>
43 #include <linux/hdreg.h>
44 #include <linux/proc_fs.h>
45 #include <linux/random.h>
46 #include <linux/module.h>
47 #include <linux/reboot.h>
48 #include <linux/file.h>
49 #include <linux/compat.h>
50 #include <linux/delay.h>
51 #include <linux/raid/md_p.h>
52 #include <linux/raid/md_u.h>
53 #include <linux/slab.h>
54 #include "md.h"
55 #include "bitmap.h"
56
57 #ifndef MODULE
58 static void autostart_arrays(int part);
59 #endif
60
61 /* pers_list is a list of registered personalities protected
62  * by pers_lock.
63  * pers_lock does extra service to protect accesses to
64  * mddev->thread when the mutex cannot be held.
65  */
66 static LIST_HEAD(pers_list);
67 static DEFINE_SPINLOCK(pers_lock);
68
69 static void md_print_devices(void);
70
71 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
72 static struct workqueue_struct *md_wq;
73 static struct workqueue_struct *md_misc_wq;
74
75 static int remove_and_add_spares(struct mddev *mddev,
76                                  struct md_rdev *this);
77
78 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79
80 /*
81  * Default number of read corrections we'll attempt on an rdev
82  * before ejecting it from the array. We divide the read error
83  * count by 2 for every hour elapsed between read errors.
84  */
85 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
86 /*
87  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
88  * is 1000 KB/sec, so the extra system load does not show up that much.
89  * Increase it if you want to have more _guaranteed_ speed. Note that
90  * the RAID driver will use the maximum available bandwidth if the IO
91  * subsystem is idle. There is also an 'absolute maximum' reconstruction
92  * speed limit - in case reconstruction slows down your system despite
93  * idle IO detection.
94  *
95  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
96  * or /sys/block/mdX/md/sync_speed_{min,max}
97  */
98
99 static int sysctl_speed_limit_min = 1000;
100 static int sysctl_speed_limit_max = 200000;
101 static inline int speed_min(struct mddev *mddev)
102 {
103         return mddev->sync_speed_min ?
104                 mddev->sync_speed_min : sysctl_speed_limit_min;
105 }
106
107 static inline int speed_max(struct mddev *mddev)
108 {
109         return mddev->sync_speed_max ?
110                 mddev->sync_speed_max : sysctl_speed_limit_max;
111 }
112
113 static struct ctl_table_header *raid_table_header;
114
115 static struct ctl_table raid_table[] = {
116         {
117                 .procname       = "speed_limit_min",
118                 .data           = &sysctl_speed_limit_min,
119                 .maxlen         = sizeof(int),
120                 .mode           = S_IRUGO|S_IWUSR,
121                 .proc_handler   = proc_dointvec,
122         },
123         {
124                 .procname       = "speed_limit_max",
125                 .data           = &sysctl_speed_limit_max,
126                 .maxlen         = sizeof(int),
127                 .mode           = S_IRUGO|S_IWUSR,
128                 .proc_handler   = proc_dointvec,
129         },
130         { }
131 };
132
133 static struct ctl_table raid_dir_table[] = {
134         {
135                 .procname       = "raid",
136                 .maxlen         = 0,
137                 .mode           = S_IRUGO|S_IXUGO,
138                 .child          = raid_table,
139         },
140         { }
141 };
142
143 static struct ctl_table raid_root_table[] = {
144         {
145                 .procname       = "dev",
146                 .maxlen         = 0,
147                 .mode           = 0555,
148                 .child          = raid_dir_table,
149         },
150         {  }
151 };
152
153 static const struct block_device_operations md_fops;
154
155 static int start_readonly;
156
157 /* bio_clone_mddev
158  * like bio_clone, but with a local bio set
159  */
160
161 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
162                             struct mddev *mddev)
163 {
164         struct bio *b;
165
166         if (!mddev || !mddev->bio_set)
167                 return bio_alloc(gfp_mask, nr_iovecs);
168
169         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
170         if (!b)
171                 return NULL;
172         return b;
173 }
174 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
175
176 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
177                             struct mddev *mddev)
178 {
179         if (!mddev || !mddev->bio_set)
180                 return bio_clone(bio, gfp_mask);
181
182         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
183 }
184 EXPORT_SYMBOL_GPL(bio_clone_mddev);
185
186 /*
187  * We have a system wide 'event count' that is incremented
188  * on any 'interesting' event, and readers of /proc/mdstat
189  * can use 'poll' or 'select' to find out when the event
190  * count increases.
191  *
192  * Events are:
193  *  start array, stop array, error, add device, remove device,
194  *  start build, activate spare
195  */
196 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
197 static atomic_t md_event_count;
198 void md_new_event(struct mddev *mddev)
199 {
200         atomic_inc(&md_event_count);
201         wake_up(&md_event_waiters);
202 }
203 EXPORT_SYMBOL_GPL(md_new_event);
204
205 /* Alternate version that can be called from interrupts
206  * when calling sysfs_notify isn't needed.
207  */
208 static void md_new_event_inintr(struct mddev *mddev)
209 {
210         atomic_inc(&md_event_count);
211         wake_up(&md_event_waiters);
212 }
213
214 /*
215  * Enables to iterate over all existing md arrays
216  * all_mddevs_lock protects this list.
217  */
218 static LIST_HEAD(all_mddevs);
219 static DEFINE_SPINLOCK(all_mddevs_lock);
220
221
222 /*
223  * iterates through all used mddevs in the system.
224  * We take care to grab the all_mddevs_lock whenever navigating
225  * the list, and to always hold a refcount when unlocked.
226  * Any code which breaks out of this loop while own
227  * a reference to the current mddev and must mddev_put it.
228  */
229 #define for_each_mddev(_mddev,_tmp)                                     \
230                                                                         \
231         for (({ spin_lock(&all_mddevs_lock);                            \
232                 _tmp = all_mddevs.next;                                 \
233                 _mddev = NULL;});                                       \
234              ({ if (_tmp != &all_mddevs)                                \
235                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
236                 spin_unlock(&all_mddevs_lock);                          \
237                 if (_mddev) mddev_put(_mddev);                          \
238                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
239                 _tmp != &all_mddevs;});                                 \
240              ({ spin_lock(&all_mddevs_lock);                            \
241                 _tmp = _tmp->next;})                                    \
242                 )
243
244
245 /* Rather than calling directly into the personality make_request function,
246  * IO requests come here first so that we can check if the device is
247  * being suspended pending a reconfiguration.
248  * We hold a refcount over the call to ->make_request.  By the time that
249  * call has finished, the bio has been linked into some internal structure
250  * and so is visible to ->quiesce(), so we don't need the refcount any more.
251  */
252 static void md_make_request(struct request_queue *q, struct bio *bio)
253 {
254         const int rw = bio_data_dir(bio);
255         struct mddev *mddev = q->queuedata;
256         int cpu;
257         unsigned int sectors;
258
259         if (mddev == NULL || mddev->pers == NULL
260             || !mddev->ready) {
261                 bio_io_error(bio);
262                 return;
263         }
264         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
265                 bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
266                 return;
267         }
268         smp_rmb(); /* Ensure implications of  'active' are visible */
269         rcu_read_lock();
270         if (mddev->suspended) {
271                 DEFINE_WAIT(__wait);
272                 for (;;) {
273                         prepare_to_wait(&mddev->sb_wait, &__wait,
274                                         TASK_UNINTERRUPTIBLE);
275                         if (!mddev->suspended)
276                                 break;
277                         rcu_read_unlock();
278                         schedule();
279                         rcu_read_lock();
280                 }
281                 finish_wait(&mddev->sb_wait, &__wait);
282         }
283         atomic_inc(&mddev->active_io);
284         rcu_read_unlock();
285
286         /*
287          * save the sectors now since our bio can
288          * go away inside make_request
289          */
290         sectors = bio_sectors(bio);
291         mddev->pers->make_request(mddev, bio);
292
293         cpu = part_stat_lock();
294         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
295         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
296         part_stat_unlock();
297
298         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
299                 wake_up(&mddev->sb_wait);
300 }
301
302 /* mddev_suspend makes sure no new requests are submitted
303  * to the device, and that any requests that have been submitted
304  * are completely handled.
305  * Once ->stop is called and completes, the module will be completely
306  * unused.
307  */
308 void mddev_suspend(struct mddev *mddev)
309 {
310         BUG_ON(mddev->suspended);
311         mddev->suspended = 1;
312         synchronize_rcu();
313         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
314         mddev->pers->quiesce(mddev, 1);
315
316         del_timer_sync(&mddev->safemode_timer);
317 }
318 EXPORT_SYMBOL_GPL(mddev_suspend);
319
320 void mddev_resume(struct mddev *mddev)
321 {
322         mddev->suspended = 0;
323         wake_up(&mddev->sb_wait);
324         mddev->pers->quiesce(mddev, 0);
325
326         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
327         md_wakeup_thread(mddev->thread);
328         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
329 }
330 EXPORT_SYMBOL_GPL(mddev_resume);
331
332 int mddev_congested(struct mddev *mddev, int bits)
333 {
334         return mddev->suspended;
335 }
336 EXPORT_SYMBOL(mddev_congested);
337
338 /*
339  * Generic flush handling for md
340  */
341
342 static void md_end_flush(struct bio *bio, int err)
343 {
344         struct md_rdev *rdev = bio->bi_private;
345         struct mddev *mddev = rdev->mddev;
346
347         rdev_dec_pending(rdev, mddev);
348
349         if (atomic_dec_and_test(&mddev->flush_pending)) {
350                 /* The pre-request flush has finished */
351                 queue_work(md_wq, &mddev->flush_work);
352         }
353         bio_put(bio);
354 }
355
356 static void md_submit_flush_data(struct work_struct *ws);
357
358 static void submit_flushes(struct work_struct *ws)
359 {
360         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
361         struct md_rdev *rdev;
362
363         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
364         atomic_set(&mddev->flush_pending, 1);
365         rcu_read_lock();
366         rdev_for_each_rcu(rdev, mddev)
367                 if (rdev->raid_disk >= 0 &&
368                     !test_bit(Faulty, &rdev->flags)) {
369                         /* Take two references, one is dropped
370                          * when request finishes, one after
371                          * we reclaim rcu_read_lock
372                          */
373                         struct bio *bi;
374                         atomic_inc(&rdev->nr_pending);
375                         atomic_inc(&rdev->nr_pending);
376                         rcu_read_unlock();
377                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
378                         bi->bi_end_io = md_end_flush;
379                         bi->bi_private = rdev;
380                         bi->bi_bdev = rdev->bdev;
381                         atomic_inc(&mddev->flush_pending);
382                         submit_bio(WRITE_FLUSH, bi);
383                         rcu_read_lock();
384                         rdev_dec_pending(rdev, mddev);
385                 }
386         rcu_read_unlock();
387         if (atomic_dec_and_test(&mddev->flush_pending))
388                 queue_work(md_wq, &mddev->flush_work);
389 }
390
391 static void md_submit_flush_data(struct work_struct *ws)
392 {
393         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
394         struct bio *bio = mddev->flush_bio;
395
396         if (bio->bi_iter.bi_size == 0)
397                 /* an empty barrier - all done */
398                 bio_endio(bio, 0);
399         else {
400                 bio->bi_rw &= ~REQ_FLUSH;
401                 mddev->pers->make_request(mddev, bio);
402         }
403
404         mddev->flush_bio = NULL;
405         wake_up(&mddev->sb_wait);
406 }
407
408 void md_flush_request(struct mddev *mddev, struct bio *bio)
409 {
410         spin_lock_irq(&mddev->write_lock);
411         wait_event_lock_irq(mddev->sb_wait,
412                             !mddev->flush_bio,
413                             mddev->write_lock);
414         mddev->flush_bio = bio;
415         spin_unlock_irq(&mddev->write_lock);
416
417         INIT_WORK(&mddev->flush_work, submit_flushes);
418         queue_work(md_wq, &mddev->flush_work);
419 }
420 EXPORT_SYMBOL(md_flush_request);
421
422 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
423 {
424         struct mddev *mddev = cb->data;
425         md_wakeup_thread(mddev->thread);
426         kfree(cb);
427 }
428 EXPORT_SYMBOL(md_unplug);
429
430 static inline struct mddev *mddev_get(struct mddev *mddev)
431 {
432         atomic_inc(&mddev->active);
433         return mddev;
434 }
435
436 static void mddev_delayed_delete(struct work_struct *ws);
437
438 static void mddev_put(struct mddev *mddev)
439 {
440         struct bio_set *bs = NULL;
441
442         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
443                 return;
444         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
445             mddev->ctime == 0 && !mddev->hold_active) {
446                 /* Array is not configured at all, and not held active,
447                  * so destroy it */
448                 list_del_init(&mddev->all_mddevs);
449                 bs = mddev->bio_set;
450                 mddev->bio_set = NULL;
451                 if (mddev->gendisk) {
452                         /* We did a probe so need to clean up.  Call
453                          * queue_work inside the spinlock so that
454                          * flush_workqueue() after mddev_find will
455                          * succeed in waiting for the work to be done.
456                          */
457                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
458                         queue_work(md_misc_wq, &mddev->del_work);
459                 } else
460                         kfree(mddev);
461         }
462         spin_unlock(&all_mddevs_lock);
463         if (bs)
464                 bioset_free(bs);
465 }
466
467 void mddev_init(struct mddev *mddev)
468 {
469         mutex_init(&mddev->open_mutex);
470         mutex_init(&mddev->reconfig_mutex);
471         mutex_init(&mddev->bitmap_info.mutex);
472         INIT_LIST_HEAD(&mddev->disks);
473         INIT_LIST_HEAD(&mddev->all_mddevs);
474         init_timer(&mddev->safemode_timer);
475         atomic_set(&mddev->active, 1);
476         atomic_set(&mddev->openers, 0);
477         atomic_set(&mddev->active_io, 0);
478         spin_lock_init(&mddev->write_lock);
479         atomic_set(&mddev->flush_pending, 0);
480         init_waitqueue_head(&mddev->sb_wait);
481         init_waitqueue_head(&mddev->recovery_wait);
482         mddev->reshape_position = MaxSector;
483         mddev->reshape_backwards = 0;
484         mddev->last_sync_action = "none";
485         mddev->resync_min = 0;
486         mddev->resync_max = MaxSector;
487         mddev->level = LEVEL_NONE;
488 }
489 EXPORT_SYMBOL_GPL(mddev_init);
490
491 static struct mddev * mddev_find(dev_t unit)
492 {
493         struct mddev *mddev, *new = NULL;
494
495         if (unit && MAJOR(unit) != MD_MAJOR)
496                 unit &= ~((1<<MdpMinorShift)-1);
497
498  retry:
499         spin_lock(&all_mddevs_lock);
500
501         if (unit) {
502                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
503                         if (mddev->unit == unit) {
504                                 mddev_get(mddev);
505                                 spin_unlock(&all_mddevs_lock);
506                                 kfree(new);
507                                 return mddev;
508                         }
509
510                 if (new) {
511                         list_add(&new->all_mddevs, &all_mddevs);
512                         spin_unlock(&all_mddevs_lock);
513                         new->hold_active = UNTIL_IOCTL;
514                         return new;
515                 }
516         } else if (new) {
517                 /* find an unused unit number */
518                 static int next_minor = 512;
519                 int start = next_minor;
520                 int is_free = 0;
521                 int dev = 0;
522                 while (!is_free) {
523                         dev = MKDEV(MD_MAJOR, next_minor);
524                         next_minor++;
525                         if (next_minor > MINORMASK)
526                                 next_minor = 0;
527                         if (next_minor == start) {
528                                 /* Oh dear, all in use. */
529                                 spin_unlock(&all_mddevs_lock);
530                                 kfree(new);
531                                 return NULL;
532                         }
533                                 
534                         is_free = 1;
535                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
536                                 if (mddev->unit == dev) {
537                                         is_free = 0;
538                                         break;
539                                 }
540                 }
541                 new->unit = dev;
542                 new->md_minor = MINOR(dev);
543                 new->hold_active = UNTIL_STOP;
544                 list_add(&new->all_mddevs, &all_mddevs);
545                 spin_unlock(&all_mddevs_lock);
546                 return new;
547         }
548         spin_unlock(&all_mddevs_lock);
549
550         new = kzalloc(sizeof(*new), GFP_KERNEL);
551         if (!new)
552                 return NULL;
553
554         new->unit = unit;
555         if (MAJOR(unit) == MD_MAJOR)
556                 new->md_minor = MINOR(unit);
557         else
558                 new->md_minor = MINOR(unit) >> MdpMinorShift;
559
560         mddev_init(new);
561
562         goto retry;
563 }
564
565 static inline int __must_check mddev_lock(struct mddev * mddev)
566 {
567         return mutex_lock_interruptible(&mddev->reconfig_mutex);
568 }
569
570 /* Sometimes we need to take the lock in a situation where
571  * failure due to interrupts is not acceptable.
572  */
573 static inline void mddev_lock_nointr(struct mddev * mddev)
574 {
575         mutex_lock(&mddev->reconfig_mutex);
576 }
577
578 static inline int mddev_is_locked(struct mddev *mddev)
579 {
580         return mutex_is_locked(&mddev->reconfig_mutex);
581 }
582
583 static inline int mddev_trylock(struct mddev * mddev)
584 {
585         return mutex_trylock(&mddev->reconfig_mutex);
586 }
587
588 static struct attribute_group md_redundancy_group;
589
590 static void mddev_unlock(struct mddev * mddev)
591 {
592         if (mddev->to_remove) {
593                 /* These cannot be removed under reconfig_mutex as
594                  * an access to the files will try to take reconfig_mutex
595                  * while holding the file unremovable, which leads to
596                  * a deadlock.
597                  * So hold set sysfs_active while the remove in happeing,
598                  * and anything else which might set ->to_remove or my
599                  * otherwise change the sysfs namespace will fail with
600                  * -EBUSY if sysfs_active is still set.
601                  * We set sysfs_active under reconfig_mutex and elsewhere
602                  * test it under the same mutex to ensure its correct value
603                  * is seen.
604                  */
605                 struct attribute_group *to_remove = mddev->to_remove;
606                 mddev->to_remove = NULL;
607                 mddev->sysfs_active = 1;
608                 mutex_unlock(&mddev->reconfig_mutex);
609
610                 if (mddev->kobj.sd) {
611                         if (to_remove != &md_redundancy_group)
612                                 sysfs_remove_group(&mddev->kobj, to_remove);
613                         if (mddev->pers == NULL ||
614                             mddev->pers->sync_request == NULL) {
615                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
616                                 if (mddev->sysfs_action)
617                                         sysfs_put(mddev->sysfs_action);
618                                 mddev->sysfs_action = NULL;
619                         }
620                 }
621                 mddev->sysfs_active = 0;
622         } else
623                 mutex_unlock(&mddev->reconfig_mutex);
624
625         /* As we've dropped the mutex we need a spinlock to
626          * make sure the thread doesn't disappear
627          */
628         spin_lock(&pers_lock);
629         md_wakeup_thread(mddev->thread);
630         spin_unlock(&pers_lock);
631 }
632
633 static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr)
634 {
635         struct md_rdev *rdev;
636
637         rdev_for_each(rdev, mddev)
638                 if (rdev->desc_nr == nr)
639                         return rdev;
640
641         return NULL;
642 }
643
644 static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
645 {
646         struct md_rdev *rdev;
647
648         rdev_for_each_rcu(rdev, mddev)
649                 if (rdev->desc_nr == nr)
650                         return rdev;
651
652         return NULL;
653 }
654
655 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
656 {
657         struct md_rdev *rdev;
658
659         rdev_for_each(rdev, mddev)
660                 if (rdev->bdev->bd_dev == dev)
661                         return rdev;
662
663         return NULL;
664 }
665
666 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
667 {
668         struct md_rdev *rdev;
669
670         rdev_for_each_rcu(rdev, mddev)
671                 if (rdev->bdev->bd_dev == dev)
672                         return rdev;
673
674         return NULL;
675 }
676
677 static struct md_personality *find_pers(int level, char *clevel)
678 {
679         struct md_personality *pers;
680         list_for_each_entry(pers, &pers_list, list) {
681                 if (level != LEVEL_NONE && pers->level == level)
682                         return pers;
683                 if (strcmp(pers->name, clevel)==0)
684                         return pers;
685         }
686         return NULL;
687 }
688
689 /* return the offset of the super block in 512byte sectors */
690 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
691 {
692         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
693         return MD_NEW_SIZE_SECTORS(num_sectors);
694 }
695
696 static int alloc_disk_sb(struct md_rdev * rdev)
697 {
698         if (rdev->sb_page)
699                 MD_BUG();
700
701         rdev->sb_page = alloc_page(GFP_KERNEL);
702         if (!rdev->sb_page) {
703                 printk(KERN_ALERT "md: out of memory.\n");
704                 return -ENOMEM;
705         }
706
707         return 0;
708 }
709
710 void md_rdev_clear(struct md_rdev *rdev)
711 {
712         if (rdev->sb_page) {
713                 put_page(rdev->sb_page);
714                 rdev->sb_loaded = 0;
715                 rdev->sb_page = NULL;
716                 rdev->sb_start = 0;
717                 rdev->sectors = 0;
718         }
719         if (rdev->bb_page) {
720                 put_page(rdev->bb_page);
721                 rdev->bb_page = NULL;
722         }
723         kfree(rdev->badblocks.page);
724         rdev->badblocks.page = NULL;
725 }
726 EXPORT_SYMBOL_GPL(md_rdev_clear);
727
728 static void super_written(struct bio *bio, int error)
729 {
730         struct md_rdev *rdev = bio->bi_private;
731         struct mddev *mddev = rdev->mddev;
732
733         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
734                 printk("md: super_written gets error=%d, uptodate=%d\n",
735                        error, test_bit(BIO_UPTODATE, &bio->bi_flags));
736                 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
737                 md_error(mddev, rdev);
738         }
739
740         if (atomic_dec_and_test(&mddev->pending_writes))
741                 wake_up(&mddev->sb_wait);
742         bio_put(bio);
743 }
744
745 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
746                    sector_t sector, int size, struct page *page)
747 {
748         /* write first size bytes of page to sector of rdev
749          * Increment mddev->pending_writes before returning
750          * and decrement it on completion, waking up sb_wait
751          * if zero is reached.
752          * If an error occurred, call md_error
753          */
754         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755
756         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
757         bio->bi_iter.bi_sector = sector;
758         bio_add_page(bio, page, size, 0);
759         bio->bi_private = rdev;
760         bio->bi_end_io = super_written;
761
762         atomic_inc(&mddev->pending_writes);
763         submit_bio(WRITE_FLUSH_FUA, bio);
764 }
765
766 void md_super_wait(struct mddev *mddev)
767 {
768         /* wait for all superblock writes that were scheduled to complete */
769         DEFINE_WAIT(wq);
770         for(;;) {
771                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
772                 if (atomic_read(&mddev->pending_writes)==0)
773                         break;
774                 schedule();
775         }
776         finish_wait(&mddev->sb_wait, &wq);
777 }
778
779 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
780                  struct page *page, int rw, bool metadata_op)
781 {
782         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
783         int ret;
784
785         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
786                 rdev->meta_bdev : rdev->bdev;
787         if (metadata_op)
788                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
789         else if (rdev->mddev->reshape_position != MaxSector &&
790                  (rdev->mddev->reshape_backwards ==
791                   (sector >= rdev->mddev->reshape_position)))
792                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
793         else
794                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
795         bio_add_page(bio, page, size, 0);
796         submit_bio_wait(rw, bio);
797
798         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
799         bio_put(bio);
800         return ret;
801 }
802 EXPORT_SYMBOL_GPL(sync_page_io);
803
804 static int read_disk_sb(struct md_rdev * rdev, int size)
805 {
806         char b[BDEVNAME_SIZE];
807         if (!rdev->sb_page) {
808                 MD_BUG();
809                 return -EINVAL;
810         }
811         if (rdev->sb_loaded)
812                 return 0;
813
814
815         if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true))
816                 goto fail;
817         rdev->sb_loaded = 1;
818         return 0;
819
820 fail:
821         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
822                 bdevname(rdev->bdev,b));
823         return -EINVAL;
824 }
825
826 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
827 {
828         return  sb1->set_uuid0 == sb2->set_uuid0 &&
829                 sb1->set_uuid1 == sb2->set_uuid1 &&
830                 sb1->set_uuid2 == sb2->set_uuid2 &&
831                 sb1->set_uuid3 == sb2->set_uuid3;
832 }
833
834 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
835 {
836         int ret;
837         mdp_super_t *tmp1, *tmp2;
838
839         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
840         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
841
842         if (!tmp1 || !tmp2) {
843                 ret = 0;
844                 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n");
845                 goto abort;
846         }
847
848         *tmp1 = *sb1;
849         *tmp2 = *sb2;
850
851         /*
852          * nr_disks is not constant
853          */
854         tmp1->nr_disks = 0;
855         tmp2->nr_disks = 0;
856
857         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
858 abort:
859         kfree(tmp1);
860         kfree(tmp2);
861         return ret;
862 }
863
864
865 static u32 md_csum_fold(u32 csum)
866 {
867         csum = (csum & 0xffff) + (csum >> 16);
868         return (csum & 0xffff) + (csum >> 16);
869 }
870
871 static unsigned int calc_sb_csum(mdp_super_t * sb)
872 {
873         u64 newcsum = 0;
874         u32 *sb32 = (u32*)sb;
875         int i;
876         unsigned int disk_csum, csum;
877
878         disk_csum = sb->sb_csum;
879         sb->sb_csum = 0;
880
881         for (i = 0; i < MD_SB_BYTES/4 ; i++)
882                 newcsum += sb32[i];
883         csum = (newcsum & 0xffffffff) + (newcsum>>32);
884
885
886 #ifdef CONFIG_ALPHA
887         /* This used to use csum_partial, which was wrong for several
888          * reasons including that different results are returned on
889          * different architectures.  It isn't critical that we get exactly
890          * the same return value as before (we always csum_fold before
891          * testing, and that removes any differences).  However as we
892          * know that csum_partial always returned a 16bit value on
893          * alphas, do a fold to maximise conformity to previous behaviour.
894          */
895         sb->sb_csum = md_csum_fold(disk_csum);
896 #else
897         sb->sb_csum = disk_csum;
898 #endif
899         return csum;
900 }
901
902
903 /*
904  * Handle superblock details.
905  * We want to be able to handle multiple superblock formats
906  * so we have a common interface to them all, and an array of
907  * different handlers.
908  * We rely on user-space to write the initial superblock, and support
909  * reading and updating of superblocks.
910  * Interface methods are:
911  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
912  *      loads and validates a superblock on dev.
913  *      if refdev != NULL, compare superblocks on both devices
914  *    Return:
915  *      0 - dev has a superblock that is compatible with refdev
916  *      1 - dev has a superblock that is compatible and newer than refdev
917  *          so dev should be used as the refdev in future
918  *     -EINVAL superblock incompatible or invalid
919  *     -othererror e.g. -EIO
920  *
921  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
922  *      Verify that dev is acceptable into mddev.
923  *       The first time, mddev->raid_disks will be 0, and data from
924  *       dev should be merged in.  Subsequent calls check that dev
925  *       is new enough.  Return 0 or -EINVAL
926  *
927  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
928  *     Update the superblock for rdev with data in mddev
929  *     This does not write to disc.
930  *
931  */
932
933 struct super_type  {
934         char                *name;
935         struct module       *owner;
936         int                 (*load_super)(struct md_rdev *rdev,
937                                           struct md_rdev *refdev,
938                                           int minor_version);
939         int                 (*validate_super)(struct mddev *mddev,
940                                               struct md_rdev *rdev);
941         void                (*sync_super)(struct mddev *mddev,
942                                           struct md_rdev *rdev);
943         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
944                                                 sector_t num_sectors);
945         int                 (*allow_new_offset)(struct md_rdev *rdev,
946                                                 unsigned long long new_offset);
947 };
948
949 /*
950  * Check that the given mddev has no bitmap.
951  *
952  * This function is called from the run method of all personalities that do not
953  * support bitmaps. It prints an error message and returns non-zero if mddev
954  * has a bitmap. Otherwise, it returns 0.
955  *
956  */
957 int md_check_no_bitmap(struct mddev *mddev)
958 {
959         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
960                 return 0;
961         printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
962                 mdname(mddev), mddev->pers->name);
963         return 1;
964 }
965 EXPORT_SYMBOL(md_check_no_bitmap);
966
967 /*
968  * load_super for 0.90.0 
969  */
970 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
971 {
972         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
973         mdp_super_t *sb;
974         int ret;
975
976         /*
977          * Calculate the position of the superblock (512byte sectors),
978          * it's at the end of the disk.
979          *
980          * It also happens to be a multiple of 4Kb.
981          */
982         rdev->sb_start = calc_dev_sboffset(rdev);
983
984         ret = read_disk_sb(rdev, MD_SB_BYTES);
985         if (ret) return ret;
986
987         ret = -EINVAL;
988
989         bdevname(rdev->bdev, b);
990         sb = page_address(rdev->sb_page);
991
992         if (sb->md_magic != MD_SB_MAGIC) {
993                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
994                        b);
995                 goto abort;
996         }
997
998         if (sb->major_version != 0 ||
999             sb->minor_version < 90 ||
1000             sb->minor_version > 91) {
1001                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
1002                         sb->major_version, sb->minor_version,
1003                         b);
1004                 goto abort;
1005         }
1006
1007         if (sb->raid_disks <= 0)
1008                 goto abort;
1009
1010         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1011                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
1012                         b);
1013                 goto abort;
1014         }
1015
1016         rdev->preferred_minor = sb->md_minor;
1017         rdev->data_offset = 0;
1018         rdev->new_data_offset = 0;
1019         rdev->sb_size = MD_SB_BYTES;
1020         rdev->badblocks.shift = -1;
1021
1022         if (sb->level == LEVEL_MULTIPATH)
1023                 rdev->desc_nr = -1;
1024         else
1025                 rdev->desc_nr = sb->this_disk.number;
1026
1027         if (!refdev) {
1028                 ret = 1;
1029         } else {
1030                 __u64 ev1, ev2;
1031                 mdp_super_t *refsb = page_address(refdev->sb_page);
1032                 if (!uuid_equal(refsb, sb)) {
1033                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
1034                                 b, bdevname(refdev->bdev,b2));
1035                         goto abort;
1036                 }
1037                 if (!sb_equal(refsb, sb)) {
1038                         printk(KERN_WARNING "md: %s has same UUID"
1039                                " but different superblock to %s\n",
1040                                b, bdevname(refdev->bdev, b2));
1041                         goto abort;
1042                 }
1043                 ev1 = md_event(sb);
1044                 ev2 = md_event(refsb);
1045                 if (ev1 > ev2)
1046                         ret = 1;
1047                 else 
1048                         ret = 0;
1049         }
1050         rdev->sectors = rdev->sb_start;
1051         /* Limit to 4TB as metadata cannot record more than that.
1052          * (not needed for Linear and RAID0 as metadata doesn't
1053          * record this size)
1054          */
1055         if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1056                 rdev->sectors = (2ULL << 32) - 2;
1057
1058         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1059                 /* "this cannot possibly happen" ... */
1060                 ret = -EINVAL;
1061
1062  abort:
1063         return ret;
1064 }
1065
1066 /*
1067  * validate_super for 0.90.0
1068  */
1069 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1070 {
1071         mdp_disk_t *desc;
1072         mdp_super_t *sb = page_address(rdev->sb_page);
1073         __u64 ev1 = md_event(sb);
1074
1075         rdev->raid_disk = -1;
1076         clear_bit(Faulty, &rdev->flags);
1077         clear_bit(In_sync, &rdev->flags);
1078         clear_bit(Bitmap_sync, &rdev->flags);
1079         clear_bit(WriteMostly, &rdev->flags);
1080
1081         if (mddev->raid_disks == 0) {
1082                 mddev->major_version = 0;
1083                 mddev->minor_version = sb->minor_version;
1084                 mddev->patch_version = sb->patch_version;
1085                 mddev->external = 0;
1086                 mddev->chunk_sectors = sb->chunk_size >> 9;
1087                 mddev->ctime = sb->ctime;
1088                 mddev->utime = sb->utime;
1089                 mddev->level = sb->level;
1090                 mddev->clevel[0] = 0;
1091                 mddev->layout = sb->layout;
1092                 mddev->raid_disks = sb->raid_disks;
1093                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1094                 mddev->events = ev1;
1095                 mddev->bitmap_info.offset = 0;
1096                 mddev->bitmap_info.space = 0;
1097                 /* bitmap can use 60 K after the 4K superblocks */
1098                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1099                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1100                 mddev->reshape_backwards = 0;
1101
1102                 if (mddev->minor_version >= 91) {
1103                         mddev->reshape_position = sb->reshape_position;
1104                         mddev->delta_disks = sb->delta_disks;
1105                         mddev->new_level = sb->new_level;
1106                         mddev->new_layout = sb->new_layout;
1107                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1108                         if (mddev->delta_disks < 0)
1109                                 mddev->reshape_backwards = 1;
1110                 } else {
1111                         mddev->reshape_position = MaxSector;
1112                         mddev->delta_disks = 0;
1113                         mddev->new_level = mddev->level;
1114                         mddev->new_layout = mddev->layout;
1115                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1116                 }
1117
1118                 if (sb->state & (1<<MD_SB_CLEAN))
1119                         mddev->recovery_cp = MaxSector;
1120                 else {
1121                         if (sb->events_hi == sb->cp_events_hi && 
1122                                 sb->events_lo == sb->cp_events_lo) {
1123                                 mddev->recovery_cp = sb->recovery_cp;
1124                         } else
1125                                 mddev->recovery_cp = 0;
1126                 }
1127
1128                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1129                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1130                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1131                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1132
1133                 mddev->max_disks = MD_SB_DISKS;
1134
1135                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1136                     mddev->bitmap_info.file == NULL) {
1137                         mddev->bitmap_info.offset =
1138                                 mddev->bitmap_info.default_offset;
1139                         mddev->bitmap_info.space =
1140                                 mddev->bitmap_info.default_space;
1141                 }
1142
1143         } else if (mddev->pers == NULL) {
1144                 /* Insist on good event counter while assembling, except
1145                  * for spares (which don't need an event count) */
1146                 ++ev1;
1147                 if (sb->disks[rdev->desc_nr].state & (
1148                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1149                         if (ev1 < mddev->events) 
1150                                 return -EINVAL;
1151         } else if (mddev->bitmap) {
1152                 /* if adding to array with a bitmap, then we can accept an
1153                  * older device ... but not too old.
1154                  */
1155                 if (ev1 < mddev->bitmap->events_cleared)
1156                         return 0;
1157                 if (ev1 < mddev->events)
1158                         set_bit(Bitmap_sync, &rdev->flags);
1159         } else {
1160                 if (ev1 < mddev->events)
1161                         /* just a hot-add of a new device, leave raid_disk at -1 */
1162                         return 0;
1163         }
1164
1165         if (mddev->level != LEVEL_MULTIPATH) {
1166                 desc = sb->disks + rdev->desc_nr;
1167
1168                 if (desc->state & (1<<MD_DISK_FAULTY))
1169                         set_bit(Faulty, &rdev->flags);
1170                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1171                             desc->raid_disk < mddev->raid_disks */) {
1172                         set_bit(In_sync, &rdev->flags);
1173                         rdev->raid_disk = desc->raid_disk;
1174                         rdev->saved_raid_disk = desc->raid_disk;
1175                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1176                         /* active but not in sync implies recovery up to
1177                          * reshape position.  We don't know exactly where
1178                          * that is, so set to zero for now */
1179                         if (mddev->minor_version >= 91) {
1180                                 rdev->recovery_offset = 0;
1181                                 rdev->raid_disk = desc->raid_disk;
1182                         }
1183                 }
1184                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1185                         set_bit(WriteMostly, &rdev->flags);
1186         } else /* MULTIPATH are always insync */
1187                 set_bit(In_sync, &rdev->flags);
1188         return 0;
1189 }
1190
1191 /*
1192  * sync_super for 0.90.0
1193  */
1194 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1195 {
1196         mdp_super_t *sb;
1197         struct md_rdev *rdev2;
1198         int next_spare = mddev->raid_disks;
1199
1200
1201         /* make rdev->sb match mddev data..
1202          *
1203          * 1/ zero out disks
1204          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1205          * 3/ any empty disks < next_spare become removed
1206          *
1207          * disks[0] gets initialised to REMOVED because
1208          * we cannot be sure from other fields if it has
1209          * been initialised or not.
1210          */
1211         int i;
1212         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1213
1214         rdev->sb_size = MD_SB_BYTES;
1215
1216         sb = page_address(rdev->sb_page);
1217
1218         memset(sb, 0, sizeof(*sb));
1219
1220         sb->md_magic = MD_SB_MAGIC;
1221         sb->major_version = mddev->major_version;
1222         sb->patch_version = mddev->patch_version;
1223         sb->gvalid_words  = 0; /* ignored */
1224         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1225         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1226         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1227         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1228
1229         sb->ctime = mddev->ctime;
1230         sb->level = mddev->level;
1231         sb->size = mddev->dev_sectors / 2;
1232         sb->raid_disks = mddev->raid_disks;
1233         sb->md_minor = mddev->md_minor;
1234         sb->not_persistent = 0;
1235         sb->utime = mddev->utime;
1236         sb->state = 0;
1237         sb->events_hi = (mddev->events>>32);
1238         sb->events_lo = (u32)mddev->events;
1239
1240         if (mddev->reshape_position == MaxSector)
1241                 sb->minor_version = 90;
1242         else {
1243                 sb->minor_version = 91;
1244                 sb->reshape_position = mddev->reshape_position;
1245                 sb->new_level = mddev->new_level;
1246                 sb->delta_disks = mddev->delta_disks;
1247                 sb->new_layout = mddev->new_layout;
1248                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1249         }
1250         mddev->minor_version = sb->minor_version;
1251         if (mddev->in_sync)
1252         {
1253                 sb->recovery_cp = mddev->recovery_cp;
1254                 sb->cp_events_hi = (mddev->events>>32);
1255                 sb->cp_events_lo = (u32)mddev->events;
1256                 if (mddev->recovery_cp == MaxSector)
1257                         sb->state = (1<< MD_SB_CLEAN);
1258         } else
1259                 sb->recovery_cp = 0;
1260
1261         sb->layout = mddev->layout;
1262         sb->chunk_size = mddev->chunk_sectors << 9;
1263
1264         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1265                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1266
1267         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1268         rdev_for_each(rdev2, mddev) {
1269                 mdp_disk_t *d;
1270                 int desc_nr;
1271                 int is_active = test_bit(In_sync, &rdev2->flags);
1272
1273                 if (rdev2->raid_disk >= 0 &&
1274                     sb->minor_version >= 91)
1275                         /* we have nowhere to store the recovery_offset,
1276                          * but if it is not below the reshape_position,
1277                          * we can piggy-back on that.
1278                          */
1279                         is_active = 1;
1280                 if (rdev2->raid_disk < 0 ||
1281                     test_bit(Faulty, &rdev2->flags))
1282                         is_active = 0;
1283                 if (is_active)
1284                         desc_nr = rdev2->raid_disk;
1285                 else
1286                         desc_nr = next_spare++;
1287                 rdev2->desc_nr = desc_nr;
1288                 d = &sb->disks[rdev2->desc_nr];
1289                 nr_disks++;
1290                 d->number = rdev2->desc_nr;
1291                 d->major = MAJOR(rdev2->bdev->bd_dev);
1292                 d->minor = MINOR(rdev2->bdev->bd_dev);
1293                 if (is_active)
1294                         d->raid_disk = rdev2->raid_disk;
1295                 else
1296                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1297                 if (test_bit(Faulty, &rdev2->flags))
1298                         d->state = (1<<MD_DISK_FAULTY);
1299                 else if (is_active) {
1300                         d->state = (1<<MD_DISK_ACTIVE);
1301                         if (test_bit(In_sync, &rdev2->flags))
1302                                 d->state |= (1<<MD_DISK_SYNC);
1303                         active++;
1304                         working++;
1305                 } else {
1306                         d->state = 0;
1307                         spare++;
1308                         working++;
1309                 }
1310                 if (test_bit(WriteMostly, &rdev2->flags))
1311                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1312         }
1313         /* now set the "removed" and "faulty" bits on any missing devices */
1314         for (i=0 ; i < mddev->raid_disks ; i++) {
1315                 mdp_disk_t *d = &sb->disks[i];
1316                 if (d->state == 0 && d->number == 0) {
1317                         d->number = i;
1318                         d->raid_disk = i;
1319                         d->state = (1<<MD_DISK_REMOVED);
1320                         d->state |= (1<<MD_DISK_FAULTY);
1321                         failed++;
1322                 }
1323         }
1324         sb->nr_disks = nr_disks;
1325         sb->active_disks = active;
1326         sb->working_disks = working;
1327         sb->failed_disks = failed;
1328         sb->spare_disks = spare;
1329
1330         sb->this_disk = sb->disks[rdev->desc_nr];
1331         sb->sb_csum = calc_sb_csum(sb);
1332 }
1333
1334 /*
1335  * rdev_size_change for 0.90.0
1336  */
1337 static unsigned long long
1338 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1339 {
1340         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1341                 return 0; /* component must fit device */
1342         if (rdev->mddev->bitmap_info.offset)
1343                 return 0; /* can't move bitmap */
1344         rdev->sb_start = calc_dev_sboffset(rdev);
1345         if (!num_sectors || num_sectors > rdev->sb_start)
1346                 num_sectors = rdev->sb_start;
1347         /* Limit to 4TB as metadata cannot record more than that.
1348          * 4TB == 2^32 KB, or 2*2^32 sectors.
1349          */
1350         if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1351                 num_sectors = (2ULL << 32) - 2;
1352         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1353                        rdev->sb_page);
1354         md_super_wait(rdev->mddev);
1355         return num_sectors;
1356 }
1357
1358 static int
1359 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1360 {
1361         /* non-zero offset changes not possible with v0.90 */
1362         return new_offset == 0;
1363 }
1364
1365 /*
1366  * version 1 superblock
1367  */
1368
1369 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb)
1370 {
1371         __le32 disk_csum;
1372         u32 csum;
1373         unsigned long long newcsum;
1374         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1375         __le32 *isuper = (__le32*)sb;
1376
1377         disk_csum = sb->sb_csum;
1378         sb->sb_csum = 0;
1379         newcsum = 0;
1380         for (; size >= 4; size -= 4)
1381                 newcsum += le32_to_cpu(*isuper++);
1382
1383         if (size == 2)
1384                 newcsum += le16_to_cpu(*(__le16*) isuper);
1385
1386         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1387         sb->sb_csum = disk_csum;
1388         return cpu_to_le32(csum);
1389 }
1390
1391 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
1392                             int acknowledged);
1393 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1394 {
1395         struct mdp_superblock_1 *sb;
1396         int ret;
1397         sector_t sb_start;
1398         sector_t sectors;
1399         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1400         int bmask;
1401
1402         /*
1403          * Calculate the position of the superblock in 512byte sectors.
1404          * It is always aligned to a 4K boundary and
1405          * depeding on minor_version, it can be:
1406          * 0: At least 8K, but less than 12K, from end of device
1407          * 1: At start of device
1408          * 2: 4K from start of device.
1409          */
1410         switch(minor_version) {
1411         case 0:
1412                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1413                 sb_start -= 8*2;
1414                 sb_start &= ~(sector_t)(4*2-1);
1415                 break;
1416         case 1:
1417                 sb_start = 0;
1418                 break;
1419         case 2:
1420                 sb_start = 8;
1421                 break;
1422         default:
1423                 return -EINVAL;
1424         }
1425         rdev->sb_start = sb_start;
1426
1427         /* superblock is rarely larger than 1K, but it can be larger,
1428          * and it is safe to read 4k, so we do that
1429          */
1430         ret = read_disk_sb(rdev, 4096);
1431         if (ret) return ret;
1432
1433
1434         sb = page_address(rdev->sb_page);
1435
1436         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1437             sb->major_version != cpu_to_le32(1) ||
1438             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1439             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1440             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1441                 return -EINVAL;
1442
1443         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1444                 printk("md: invalid superblock checksum on %s\n",
1445                         bdevname(rdev->bdev,b));
1446                 return -EINVAL;
1447         }
1448         if (le64_to_cpu(sb->data_size) < 10) {
1449                 printk("md: data_size too small on %s\n",
1450                        bdevname(rdev->bdev,b));
1451                 return -EINVAL;
1452         }
1453         if (sb->pad0 ||
1454             sb->pad3[0] ||
1455             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1456                 /* Some padding is non-zero, might be a new feature */
1457                 return -EINVAL;
1458
1459         rdev->preferred_minor = 0xffff;
1460         rdev->data_offset = le64_to_cpu(sb->data_offset);
1461         rdev->new_data_offset = rdev->data_offset;
1462         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1463             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1464                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1465         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1466
1467         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1468         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1469         if (rdev->sb_size & bmask)
1470                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1471
1472         if (minor_version
1473             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1474                 return -EINVAL;
1475         if (minor_version
1476             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1477                 return -EINVAL;
1478
1479         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1480                 rdev->desc_nr = -1;
1481         else
1482                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1483
1484         if (!rdev->bb_page) {
1485                 rdev->bb_page = alloc_page(GFP_KERNEL);
1486                 if (!rdev->bb_page)
1487                         return -ENOMEM;
1488         }
1489         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1490             rdev->badblocks.count == 0) {
1491                 /* need to load the bad block list.
1492                  * Currently we limit it to one page.
1493                  */
1494                 s32 offset;
1495                 sector_t bb_sector;
1496                 u64 *bbp;
1497                 int i;
1498                 int sectors = le16_to_cpu(sb->bblog_size);
1499                 if (sectors > (PAGE_SIZE / 512))
1500                         return -EINVAL;
1501                 offset = le32_to_cpu(sb->bblog_offset);
1502                 if (offset == 0)
1503                         return -EINVAL;
1504                 bb_sector = (long long)offset;
1505                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1506                                   rdev->bb_page, READ, true))
1507                         return -EIO;
1508                 bbp = (u64 *)page_address(rdev->bb_page);
1509                 rdev->badblocks.shift = sb->bblog_shift;
1510                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1511                         u64 bb = le64_to_cpu(*bbp);
1512                         int count = bb & (0x3ff);
1513                         u64 sector = bb >> 10;
1514                         sector <<= sb->bblog_shift;
1515                         count <<= sb->bblog_shift;
1516                         if (bb + 1 == 0)
1517                                 break;
1518                         if (md_set_badblocks(&rdev->badblocks,
1519                                              sector, count, 1) == 0)
1520                                 return -EINVAL;
1521                 }
1522         } else if (sb->bblog_offset != 0)
1523                 rdev->badblocks.shift = 0;
1524
1525         if (!refdev) {
1526                 ret = 1;
1527         } else {
1528                 __u64 ev1, ev2;
1529                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1530
1531                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1532                     sb->level != refsb->level ||
1533                     sb->layout != refsb->layout ||
1534                     sb->chunksize != refsb->chunksize) {
1535                         printk(KERN_WARNING "md: %s has strangely different"
1536                                 " superblock to %s\n",
1537                                 bdevname(rdev->bdev,b),
1538                                 bdevname(refdev->bdev,b2));
1539                         return -EINVAL;
1540                 }
1541                 ev1 = le64_to_cpu(sb->events);
1542                 ev2 = le64_to_cpu(refsb->events);
1543
1544                 if (ev1 > ev2)
1545                         ret = 1;
1546                 else
1547                         ret = 0;
1548         }
1549         if (minor_version) {
1550                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1551                 sectors -= rdev->data_offset;
1552         } else
1553                 sectors = rdev->sb_start;
1554         if (sectors < le64_to_cpu(sb->data_size))
1555                 return -EINVAL;
1556         rdev->sectors = le64_to_cpu(sb->data_size);
1557         return ret;
1558 }
1559
1560 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1561 {
1562         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1563         __u64 ev1 = le64_to_cpu(sb->events);
1564
1565         rdev->raid_disk = -1;
1566         clear_bit(Faulty, &rdev->flags);
1567         clear_bit(In_sync, &rdev->flags);
1568         clear_bit(Bitmap_sync, &rdev->flags);
1569         clear_bit(WriteMostly, &rdev->flags);
1570
1571         if (mddev->raid_disks == 0) {
1572                 mddev->major_version = 1;
1573                 mddev->patch_version = 0;
1574                 mddev->external = 0;
1575                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1576                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1577                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1578                 mddev->level = le32_to_cpu(sb->level);
1579                 mddev->clevel[0] = 0;
1580                 mddev->layout = le32_to_cpu(sb->layout);
1581                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1582                 mddev->dev_sectors = le64_to_cpu(sb->size);
1583                 mddev->events = ev1;
1584                 mddev->bitmap_info.offset = 0;
1585                 mddev->bitmap_info.space = 0;
1586                 /* Default location for bitmap is 1K after superblock
1587                  * using 3K - total of 4K
1588                  */
1589                 mddev->bitmap_info.default_offset = 1024 >> 9;
1590                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1591                 mddev->reshape_backwards = 0;
1592
1593                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1594                 memcpy(mddev->uuid, sb->set_uuid, 16);
1595
1596                 mddev->max_disks =  (4096-256)/2;
1597
1598                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1599                     mddev->bitmap_info.file == NULL) {
1600                         mddev->bitmap_info.offset =
1601                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1602                         /* Metadata doesn't record how much space is available.
1603                          * For 1.0, we assume we can use up to the superblock
1604                          * if before, else to 4K beyond superblock.
1605                          * For others, assume no change is possible.
1606                          */
1607                         if (mddev->minor_version > 0)
1608                                 mddev->bitmap_info.space = 0;
1609                         else if (mddev->bitmap_info.offset > 0)
1610                                 mddev->bitmap_info.space =
1611                                         8 - mddev->bitmap_info.offset;
1612                         else
1613                                 mddev->bitmap_info.space =
1614                                         -mddev->bitmap_info.offset;
1615                 }
1616
1617                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1618                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1619                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1620                         mddev->new_level = le32_to_cpu(sb->new_level);
1621                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1622                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1623                         if (mddev->delta_disks < 0 ||
1624                             (mddev->delta_disks == 0 &&
1625                              (le32_to_cpu(sb->feature_map)
1626                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1627                                 mddev->reshape_backwards = 1;
1628                 } else {
1629                         mddev->reshape_position = MaxSector;
1630                         mddev->delta_disks = 0;
1631                         mddev->new_level = mddev->level;
1632                         mddev->new_layout = mddev->layout;
1633                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1634                 }
1635
1636         } else if (mddev->pers == NULL) {
1637                 /* Insist of good event counter while assembling, except for
1638                  * spares (which don't need an event count) */
1639                 ++ev1;
1640                 if (rdev->desc_nr >= 0 &&
1641                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1642                     le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe)
1643                         if (ev1 < mddev->events)
1644                                 return -EINVAL;
1645         } else if (mddev->bitmap) {
1646                 /* If adding to array with a bitmap, then we can accept an
1647                  * older device, but not too old.
1648                  */
1649                 if (ev1 < mddev->bitmap->events_cleared)
1650                         return 0;
1651                 if (ev1 < mddev->events)
1652                         set_bit(Bitmap_sync, &rdev->flags);
1653         } else {
1654                 if (ev1 < mddev->events)
1655                         /* just a hot-add of a new device, leave raid_disk at -1 */
1656                         return 0;
1657         }
1658         if (mddev->level != LEVEL_MULTIPATH) {
1659                 int role;
1660                 if (rdev->desc_nr < 0 ||
1661                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1662                         role = 0xffff;
1663                         rdev->desc_nr = -1;
1664                 } else
1665                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1666                 switch(role) {
1667                 case 0xffff: /* spare */
1668                         break;
1669                 case 0xfffe: /* faulty */
1670                         set_bit(Faulty, &rdev->flags);
1671                         break;
1672                 default:
1673                         rdev->saved_raid_disk = role;
1674                         if ((le32_to_cpu(sb->feature_map) &
1675                              MD_FEATURE_RECOVERY_OFFSET)) {
1676                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1677                                 if (!(le32_to_cpu(sb->feature_map) &
1678                                       MD_FEATURE_RECOVERY_BITMAP))
1679                                         rdev->saved_raid_disk = -1;
1680                         } else
1681                                 set_bit(In_sync, &rdev->flags);
1682                         rdev->raid_disk = role;
1683                         break;
1684                 }
1685                 if (sb->devflags & WriteMostly1)
1686                         set_bit(WriteMostly, &rdev->flags);
1687                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1688                         set_bit(Replacement, &rdev->flags);
1689         } else /* MULTIPATH are always insync */
1690                 set_bit(In_sync, &rdev->flags);
1691
1692         return 0;
1693 }
1694
1695 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1696 {
1697         struct mdp_superblock_1 *sb;
1698         struct md_rdev *rdev2;
1699         int max_dev, i;
1700         /* make rdev->sb match mddev and rdev data. */
1701
1702         sb = page_address(rdev->sb_page);
1703
1704         sb->feature_map = 0;
1705         sb->pad0 = 0;
1706         sb->recovery_offset = cpu_to_le64(0);
1707         memset(sb->pad3, 0, sizeof(sb->pad3));
1708
1709         sb->utime = cpu_to_le64((__u64)mddev->utime);
1710         sb->events = cpu_to_le64(mddev->events);
1711         if (mddev->in_sync)
1712                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1713         else
1714                 sb->resync_offset = cpu_to_le64(0);
1715
1716         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1717
1718         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1719         sb->size = cpu_to_le64(mddev->dev_sectors);
1720         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1721         sb->level = cpu_to_le32(mddev->level);
1722         sb->layout = cpu_to_le32(mddev->layout);
1723
1724         if (test_bit(WriteMostly, &rdev->flags))
1725                 sb->devflags |= WriteMostly1;
1726         else
1727                 sb->devflags &= ~WriteMostly1;
1728         sb->data_offset = cpu_to_le64(rdev->data_offset);
1729         sb->data_size = cpu_to_le64(rdev->sectors);
1730
1731         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1732                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1733                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1734         }
1735
1736         if (rdev->raid_disk >= 0 &&
1737             !test_bit(In_sync, &rdev->flags)) {
1738                 sb->feature_map |=
1739                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1740                 sb->recovery_offset =
1741                         cpu_to_le64(rdev->recovery_offset);
1742                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1743                         sb->feature_map |=
1744                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1745         }
1746         if (test_bit(Replacement, &rdev->flags))
1747                 sb->feature_map |=
1748                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1749
1750         if (mddev->reshape_position != MaxSector) {
1751                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1752                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1753                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1754                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1755                 sb->new_level = cpu_to_le32(mddev->new_level);
1756                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1757                 if (mddev->delta_disks == 0 &&
1758                     mddev->reshape_backwards)
1759                         sb->feature_map
1760                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1761                 if (rdev->new_data_offset != rdev->data_offset) {
1762                         sb->feature_map
1763                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1764                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1765                                                              - rdev->data_offset));
1766                 }
1767         }
1768
1769         if (rdev->badblocks.count == 0)
1770                 /* Nothing to do for bad blocks*/ ;
1771         else if (sb->bblog_offset == 0)
1772                 /* Cannot record bad blocks on this device */
1773                 md_error(mddev, rdev);
1774         else {
1775                 struct badblocks *bb = &rdev->badblocks;
1776                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1777                 u64 *p = bb->page;
1778                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1779                 if (bb->changed) {
1780                         unsigned seq;
1781
1782 retry:
1783                         seq = read_seqbegin(&bb->lock);
1784
1785                         memset(bbp, 0xff, PAGE_SIZE);
1786
1787                         for (i = 0 ; i < bb->count ; i++) {
1788                                 u64 internal_bb = p[i];
1789                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1790                                                 | BB_LEN(internal_bb));
1791                                 bbp[i] = cpu_to_le64(store_bb);
1792                         }
1793                         bb->changed = 0;
1794                         if (read_seqretry(&bb->lock, seq))
1795                                 goto retry;
1796
1797                         bb->sector = (rdev->sb_start +
1798                                       (int)le32_to_cpu(sb->bblog_offset));
1799                         bb->size = le16_to_cpu(sb->bblog_size);
1800                 }
1801         }
1802
1803         max_dev = 0;
1804         rdev_for_each(rdev2, mddev)
1805                 if (rdev2->desc_nr+1 > max_dev)
1806                         max_dev = rdev2->desc_nr+1;
1807
1808         if (max_dev > le32_to_cpu(sb->max_dev)) {
1809                 int bmask;
1810                 sb->max_dev = cpu_to_le32(max_dev);
1811                 rdev->sb_size = max_dev * 2 + 256;
1812                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1813                 if (rdev->sb_size & bmask)
1814                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1815         } else
1816                 max_dev = le32_to_cpu(sb->max_dev);
1817
1818         for (i=0; i<max_dev;i++)
1819                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1820         
1821         rdev_for_each(rdev2, mddev) {
1822                 i = rdev2->desc_nr;
1823                 if (test_bit(Faulty, &rdev2->flags))
1824                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1825                 else if (test_bit(In_sync, &rdev2->flags))
1826                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1827                 else if (rdev2->raid_disk >= 0)
1828                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1829                 else
1830                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1831         }
1832
1833         sb->sb_csum = calc_sb_1_csum(sb);
1834 }
1835
1836 static unsigned long long
1837 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1838 {
1839         struct mdp_superblock_1 *sb;
1840         sector_t max_sectors;
1841         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1842                 return 0; /* component must fit device */
1843         if (rdev->data_offset != rdev->new_data_offset)
1844                 return 0; /* too confusing */
1845         if (rdev->sb_start < rdev->data_offset) {
1846                 /* minor versions 1 and 2; superblock before data */
1847                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1848                 max_sectors -= rdev->data_offset;
1849                 if (!num_sectors || num_sectors > max_sectors)
1850                         num_sectors = max_sectors;
1851         } else if (rdev->mddev->bitmap_info.offset) {
1852                 /* minor version 0 with bitmap we can't move */
1853                 return 0;
1854         } else {
1855                 /* minor version 0; superblock after data */
1856                 sector_t sb_start;
1857                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1858                 sb_start &= ~(sector_t)(4*2 - 1);
1859                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1860                 if (!num_sectors || num_sectors > max_sectors)
1861                         num_sectors = max_sectors;
1862                 rdev->sb_start = sb_start;
1863         }
1864         sb = page_address(rdev->sb_page);
1865         sb->data_size = cpu_to_le64(num_sectors);
1866         sb->super_offset = rdev->sb_start;
1867         sb->sb_csum = calc_sb_1_csum(sb);
1868         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1869                        rdev->sb_page);
1870         md_super_wait(rdev->mddev);
1871         return num_sectors;
1872
1873 }
1874
1875 static int
1876 super_1_allow_new_offset(struct md_rdev *rdev,
1877                          unsigned long long new_offset)
1878 {
1879         /* All necessary checks on new >= old have been done */
1880         struct bitmap *bitmap;
1881         if (new_offset >= rdev->data_offset)
1882                 return 1;
1883
1884         /* with 1.0 metadata, there is no metadata to tread on
1885          * so we can always move back */
1886         if (rdev->mddev->minor_version == 0)
1887                 return 1;
1888
1889         /* otherwise we must be sure not to step on
1890          * any metadata, so stay:
1891          * 36K beyond start of superblock
1892          * beyond end of badblocks
1893          * beyond write-intent bitmap
1894          */
1895         if (rdev->sb_start + (32+4)*2 > new_offset)
1896                 return 0;
1897         bitmap = rdev->mddev->bitmap;
1898         if (bitmap && !rdev->mddev->bitmap_info.file &&
1899             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1900             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1901                 return 0;
1902         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1903                 return 0;
1904
1905         return 1;
1906 }
1907
1908 static struct super_type super_types[] = {
1909         [0] = {
1910                 .name   = "0.90.0",
1911                 .owner  = THIS_MODULE,
1912                 .load_super         = super_90_load,
1913                 .validate_super     = super_90_validate,
1914                 .sync_super         = super_90_sync,
1915                 .rdev_size_change   = super_90_rdev_size_change,
1916                 .allow_new_offset   = super_90_allow_new_offset,
1917         },
1918         [1] = {
1919                 .name   = "md-1",
1920                 .owner  = THIS_MODULE,
1921                 .load_super         = super_1_load,
1922                 .validate_super     = super_1_validate,
1923                 .sync_super         = super_1_sync,
1924                 .rdev_size_change   = super_1_rdev_size_change,
1925                 .allow_new_offset   = super_1_allow_new_offset,
1926         },
1927 };
1928
1929 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1930 {
1931         if (mddev->sync_super) {
1932                 mddev->sync_super(mddev, rdev);
1933                 return;
1934         }
1935
1936         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1937
1938         super_types[mddev->major_version].sync_super(mddev, rdev);
1939 }
1940
1941 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1942 {
1943         struct md_rdev *rdev, *rdev2;
1944
1945         rcu_read_lock();
1946         rdev_for_each_rcu(rdev, mddev1)
1947                 rdev_for_each_rcu(rdev2, mddev2)
1948                         if (rdev->bdev->bd_contains ==
1949                             rdev2->bdev->bd_contains) {
1950                                 rcu_read_unlock();
1951                                 return 1;
1952                         }
1953         rcu_read_unlock();
1954         return 0;
1955 }
1956
1957 static LIST_HEAD(pending_raid_disks);
1958
1959 /*
1960  * Try to register data integrity profile for an mddev
1961  *
1962  * This is called when an array is started and after a disk has been kicked
1963  * from the array. It only succeeds if all working and active component devices
1964  * are integrity capable with matching profiles.
1965  */
1966 int md_integrity_register(struct mddev *mddev)
1967 {
1968         struct md_rdev *rdev, *reference = NULL;
1969
1970         if (list_empty(&mddev->disks))
1971                 return 0; /* nothing to do */
1972         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1973                 return 0; /* shouldn't register, or already is */
1974         rdev_for_each(rdev, mddev) {
1975                 /* skip spares and non-functional disks */
1976                 if (test_bit(Faulty, &rdev->flags))
1977                         continue;
1978                 if (rdev->raid_disk < 0)
1979                         continue;
1980                 if (!reference) {
1981                         /* Use the first rdev as the reference */
1982                         reference = rdev;
1983                         continue;
1984                 }
1985                 /* does this rdev's profile match the reference profile? */
1986                 if (blk_integrity_compare(reference->bdev->bd_disk,
1987                                 rdev->bdev->bd_disk) < 0)
1988                         return -EINVAL;
1989         }
1990         if (!reference || !bdev_get_integrity(reference->bdev))
1991                 return 0;
1992         /*
1993          * All component devices are integrity capable and have matching
1994          * profiles, register the common profile for the md device.
1995          */
1996         if (blk_integrity_register(mddev->gendisk,
1997                         bdev_get_integrity(reference->bdev)) != 0) {
1998                 printk(KERN_ERR "md: failed to register integrity for %s\n",
1999                         mdname(mddev));
2000                 return -EINVAL;
2001         }
2002         printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
2003         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2004                 printk(KERN_ERR "md: failed to create integrity pool for %s\n",
2005                        mdname(mddev));
2006                 return -EINVAL;
2007         }
2008         return 0;
2009 }
2010 EXPORT_SYMBOL(md_integrity_register);
2011
2012 /* Disable data integrity if non-capable/non-matching disk is being added */
2013 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2014 {
2015         struct blk_integrity *bi_rdev;
2016         struct blk_integrity *bi_mddev;
2017
2018         if (!mddev->gendisk)
2019                 return;
2020
2021         bi_rdev = bdev_get_integrity(rdev->bdev);
2022         bi_mddev = blk_get_integrity(mddev->gendisk);
2023
2024         if (!bi_mddev) /* nothing to do */
2025                 return;
2026         if (rdev->raid_disk < 0) /* skip spares */
2027                 return;
2028         if (bi_rdev && blk_integrity_compare(mddev->gendisk,
2029                                              rdev->bdev->bd_disk) >= 0)
2030                 return;
2031         printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
2032         blk_integrity_unregister(mddev->gendisk);
2033 }
2034 EXPORT_SYMBOL(md_integrity_add_rdev);
2035
2036 static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev)
2037 {
2038         char b[BDEVNAME_SIZE];
2039         struct kobject *ko;
2040         char *s;
2041         int err;
2042
2043         if (rdev->mddev) {
2044                 MD_BUG();
2045                 return -EINVAL;
2046         }
2047
2048         /* prevent duplicates */
2049         if (find_rdev(mddev, rdev->bdev->bd_dev))
2050                 return -EEXIST;
2051
2052         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2053         if (rdev->sectors && (mddev->dev_sectors == 0 ||
2054                         rdev->sectors < mddev->dev_sectors)) {
2055                 if (mddev->pers) {
2056                         /* Cannot change size, so fail
2057                          * If mddev->level <= 0, then we don't care
2058                          * about aligning sizes (e.g. linear)
2059                          */
2060                         if (mddev->level > 0)
2061                                 return -ENOSPC;
2062                 } else
2063                         mddev->dev_sectors = rdev->sectors;
2064         }
2065
2066         /* Verify rdev->desc_nr is unique.
2067          * If it is -1, assign a free number, else
2068          * check number is not in use
2069          */
2070         if (rdev->desc_nr < 0) {
2071                 int choice = 0;
2072                 if (mddev->pers) choice = mddev->raid_disks;
2073                 while (find_rdev_nr(mddev, choice))
2074                         choice++;
2075                 rdev->desc_nr = choice;
2076         } else {
2077                 if (find_rdev_nr(mddev, rdev->desc_nr))
2078                         return -EBUSY;
2079         }
2080         if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2081                 printk(KERN_WARNING "md: %s: array is limited to %d devices\n",
2082                        mdname(mddev), mddev->max_disks);
2083                 return -EBUSY;
2084         }
2085         bdevname(rdev->bdev,b);
2086         while ( (s=strchr(b, '/')) != NULL)
2087                 *s = '!';
2088
2089         rdev->mddev = mddev;
2090         printk(KERN_INFO "md: bind<%s>\n", b);
2091
2092         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2093                 goto fail;
2094
2095         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2096         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2097                 /* failure here is OK */;
2098         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2099
2100         list_add_rcu(&rdev->same_set, &mddev->disks);
2101         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2102
2103         /* May as well allow recovery to be retried once */
2104         mddev->recovery_disabled++;
2105
2106         return 0;
2107
2108  fail:
2109         printk(KERN_WARNING "md: failed to register dev-%s for %s\n",
2110                b, mdname(mddev));
2111         return err;
2112 }
2113
2114 static void md_delayed_delete(struct work_struct *ws)
2115 {
2116         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2117         kobject_del(&rdev->kobj);
2118         kobject_put(&rdev->kobj);
2119 }
2120
2121 static void unbind_rdev_from_array(struct md_rdev * rdev)
2122 {
2123         char b[BDEVNAME_SIZE];
2124         if (!rdev->mddev) {
2125                 MD_BUG();
2126                 return;
2127         }
2128         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2129         list_del_rcu(&rdev->same_set);
2130         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
2131         rdev->mddev = NULL;
2132         sysfs_remove_link(&rdev->kobj, "block");
2133         sysfs_put(rdev->sysfs_state);
2134         rdev->sysfs_state = NULL;
2135         rdev->badblocks.count = 0;
2136         /* We need to delay this, otherwise we can deadlock when
2137          * writing to 'remove' to "dev/state".  We also need
2138          * to delay it due to rcu usage.
2139          */
2140         synchronize_rcu();
2141         INIT_WORK(&rdev->del_work, md_delayed_delete);
2142         kobject_get(&rdev->kobj);
2143         queue_work(md_misc_wq, &rdev->del_work);
2144 }
2145
2146 /*
2147  * prevent the device from being mounted, repartitioned or
2148  * otherwise reused by a RAID array (or any other kernel
2149  * subsystem), by bd_claiming the device.
2150  */
2151 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2152 {
2153         int err = 0;
2154         struct block_device *bdev;
2155         char b[BDEVNAME_SIZE];
2156
2157         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2158                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2159         if (IS_ERR(bdev)) {
2160                 printk(KERN_ERR "md: could not open %s.\n",
2161                         __bdevname(dev, b));
2162                 return PTR_ERR(bdev);
2163         }
2164         rdev->bdev = bdev;
2165         return err;
2166 }
2167
2168 static void unlock_rdev(struct md_rdev *rdev)
2169 {
2170         struct block_device *bdev = rdev->bdev;
2171         rdev->bdev = NULL;
2172         if (!bdev)
2173                 MD_BUG();
2174         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2175 }
2176
2177 void md_autodetect_dev(dev_t dev);
2178
2179 static void export_rdev(struct md_rdev * rdev)
2180 {
2181         char b[BDEVNAME_SIZE];
2182         printk(KERN_INFO "md: export_rdev(%s)\n",
2183                 bdevname(rdev->bdev,b));
2184         if (rdev->mddev)
2185                 MD_BUG();
2186         md_rdev_clear(rdev);
2187 #ifndef MODULE
2188         if (test_bit(AutoDetected, &rdev->flags))
2189                 md_autodetect_dev(rdev->bdev->bd_dev);
2190 #endif
2191         unlock_rdev(rdev);
2192         kobject_put(&rdev->kobj);
2193 }
2194
2195 static void kick_rdev_from_array(struct md_rdev * rdev)
2196 {
2197         unbind_rdev_from_array(rdev);
2198         export_rdev(rdev);
2199 }
2200
2201 static void export_array(struct mddev *mddev)
2202 {
2203         struct md_rdev *rdev, *tmp;
2204
2205         rdev_for_each_safe(rdev, tmp, mddev) {
2206                 if (!rdev->mddev) {
2207                         MD_BUG();
2208                         continue;
2209                 }
2210                 kick_rdev_from_array(rdev);
2211         }
2212         if (!list_empty(&mddev->disks))
2213                 MD_BUG();
2214         mddev->raid_disks = 0;
2215         mddev->major_version = 0;
2216 }
2217
2218 static void print_desc(mdp_disk_t *desc)
2219 {
2220         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
2221                 desc->major,desc->minor,desc->raid_disk,desc->state);
2222 }
2223
2224 static void print_sb_90(mdp_super_t *sb)
2225 {
2226         int i;
2227
2228         printk(KERN_INFO 
2229                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
2230                 sb->major_version, sb->minor_version, sb->patch_version,
2231                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
2232                 sb->ctime);
2233         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
2234                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
2235                 sb->md_minor, sb->layout, sb->chunk_size);
2236         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
2237                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
2238                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
2239                 sb->failed_disks, sb->spare_disks,
2240                 sb->sb_csum, (unsigned long)sb->events_lo);
2241
2242         printk(KERN_INFO);
2243         for (i = 0; i < MD_SB_DISKS; i++) {
2244                 mdp_disk_t *desc;
2245
2246                 desc = sb->disks + i;
2247                 if (desc->number || desc->major || desc->minor ||
2248                     desc->raid_disk || (desc->state && (desc->state != 4))) {
2249                         printk("     D %2d: ", i);
2250                         print_desc(desc);
2251                 }
2252         }
2253         printk(KERN_INFO "md:     THIS: ");
2254         print_desc(&sb->this_disk);
2255 }
2256
2257 static void print_sb_1(struct mdp_superblock_1 *sb)
2258 {
2259         __u8 *uuid;
2260
2261         uuid = sb->set_uuid;
2262         printk(KERN_INFO
2263                "md:  SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n"
2264                "md:    Name: \"%s\" CT:%llu\n",
2265                 le32_to_cpu(sb->major_version),
2266                 le32_to_cpu(sb->feature_map),
2267                 uuid,
2268                 sb->set_name,
2269                 (unsigned long long)le64_to_cpu(sb->ctime)
2270                        & MD_SUPERBLOCK_1_TIME_SEC_MASK);
2271
2272         uuid = sb->device_uuid;
2273         printk(KERN_INFO
2274                "md:       L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu"
2275                         " RO:%llu\n"
2276                "md:     Dev:%08x UUID: %pU\n"
2277                "md:       (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n"
2278                "md:         (MaxDev:%u) \n",
2279                 le32_to_cpu(sb->level),
2280                 (unsigned long long)le64_to_cpu(sb->size),
2281                 le32_to_cpu(sb->raid_disks),
2282                 le32_to_cpu(sb->layout),
2283                 le32_to_cpu(sb->chunksize),
2284                 (unsigned long long)le64_to_cpu(sb->data_offset),
2285                 (unsigned long long)le64_to_cpu(sb->data_size),
2286                 (unsigned long long)le64_to_cpu(sb->super_offset),
2287                 (unsigned long long)le64_to_cpu(sb->recovery_offset),
2288                 le32_to_cpu(sb->dev_number),
2289                 uuid,
2290                 sb->devflags,
2291                 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK,
2292                 (unsigned long long)le64_to_cpu(sb->events),
2293                 (unsigned long long)le64_to_cpu(sb->resync_offset),
2294                 le32_to_cpu(sb->sb_csum),
2295                 le32_to_cpu(sb->max_dev)
2296                 );
2297 }
2298
2299 static void print_rdev(struct md_rdev *rdev, int major_version)
2300 {
2301         char b[BDEVNAME_SIZE];
2302         printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n",
2303                 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors,
2304                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
2305                 rdev->desc_nr);
2306         if (rdev->sb_loaded) {
2307                 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version);
2308                 switch (major_version) {
2309                 case 0:
2310                         print_sb_90(page_address(rdev->sb_page));
2311                         break;
2312                 case 1:
2313                         print_sb_1(page_address(rdev->sb_page));
2314                         break;
2315                 }
2316         } else
2317                 printk(KERN_INFO "md: no rdev superblock!\n");
2318 }
2319
2320 static void md_print_devices(void)
2321 {
2322         struct list_head *tmp;
2323         struct md_rdev *rdev;
2324         struct mddev *mddev;
2325         char b[BDEVNAME_SIZE];
2326
2327         printk("\n");
2328         printk("md:     **********************************\n");
2329         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
2330         printk("md:     **********************************\n");
2331         for_each_mddev(mddev, tmp) {
2332
2333                 if (mddev->bitmap)
2334                         bitmap_print_sb(mddev->bitmap);
2335                 else
2336                         printk("%s: ", mdname(mddev));
2337                 rdev_for_each(rdev, mddev)
2338                         printk("<%s>", bdevname(rdev->bdev,b));
2339                 printk("\n");
2340
2341                 rdev_for_each(rdev, mddev)
2342                         print_rdev(rdev, mddev->major_version);
2343         }
2344         printk("md:     **********************************\n");
2345         printk("\n");
2346 }
2347
2348
2349 static void sync_sbs(struct mddev * mddev, int nospares)
2350 {
2351         /* Update each superblock (in-memory image), but
2352          * if we are allowed to, skip spares which already
2353          * have the right event counter, or have one earlier
2354          * (which would mean they aren't being marked as dirty
2355          * with the rest of the array)
2356          */
2357         struct md_rdev *rdev;
2358         rdev_for_each(rdev, mddev) {
2359                 if (rdev->sb_events == mddev->events ||
2360                     (nospares &&
2361                      rdev->raid_disk < 0 &&
2362                      rdev->sb_events+1 == mddev->events)) {
2363                         /* Don't update this superblock */
2364                         rdev->sb_loaded = 2;
2365                 } else {
2366                         sync_super(mddev, rdev);
2367                         rdev->sb_loaded = 1;
2368                 }
2369         }
2370 }
2371
2372 static void md_update_sb(struct mddev * mddev, int force_change)
2373 {
2374         struct md_rdev *rdev;
2375         int sync_req;
2376         int nospares = 0;
2377         int any_badblocks_changed = 0;
2378
2379         if (mddev->ro) {
2380                 if (force_change)
2381                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2382                 return;
2383         }
2384 repeat:
2385         /* First make sure individual recovery_offsets are correct */
2386         rdev_for_each(rdev, mddev) {
2387                 if (rdev->raid_disk >= 0 &&
2388                     mddev->delta_disks >= 0 &&
2389                     !test_bit(In_sync, &rdev->flags) &&
2390                     mddev->curr_resync_completed > rdev->recovery_offset)
2391                                 rdev->recovery_offset = mddev->curr_resync_completed;
2392
2393         }       
2394         if (!mddev->persistent) {
2395                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2396                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2397                 if (!mddev->external) {
2398                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2399                         rdev_for_each(rdev, mddev) {
2400                                 if (rdev->badblocks.changed) {
2401                                         rdev->badblocks.changed = 0;
2402                                         md_ack_all_badblocks(&rdev->badblocks);
2403                                         md_error(mddev, rdev);
2404                                 }
2405                                 clear_bit(Blocked, &rdev->flags);
2406                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2407                                 wake_up(&rdev->blocked_wait);
2408                         }
2409                 }
2410                 wake_up(&mddev->sb_wait);
2411                 return;
2412         }
2413
2414         spin_lock_irq(&mddev->write_lock);
2415
2416         mddev->utime = get_seconds();
2417
2418         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2419                 force_change = 1;
2420         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2421                 /* just a clean<-> dirty transition, possibly leave spares alone,
2422                  * though if events isn't the right even/odd, we will have to do
2423                  * spares after all
2424                  */
2425                 nospares = 1;
2426         if (force_change)
2427                 nospares = 0;
2428         if (mddev->degraded)
2429                 /* If the array is degraded, then skipping spares is both
2430                  * dangerous and fairly pointless.
2431                  * Dangerous because a device that was removed from the array
2432                  * might have a event_count that still looks up-to-date,
2433                  * so it can be re-added without a resync.
2434                  * Pointless because if there are any spares to skip,
2435                  * then a recovery will happen and soon that array won't
2436                  * be degraded any more and the spare can go back to sleep then.
2437                  */
2438                 nospares = 0;
2439
2440         sync_req = mddev->in_sync;
2441
2442         /* If this is just a dirty<->clean transition, and the array is clean
2443          * and 'events' is odd, we can roll back to the previous clean state */
2444         if (nospares
2445             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2446             && mddev->can_decrease_events
2447             && mddev->events != 1) {
2448                 mddev->events--;
2449                 mddev->can_decrease_events = 0;
2450         } else {
2451                 /* otherwise we have to go forward and ... */
2452                 mddev->events ++;
2453                 mddev->can_decrease_events = nospares;
2454         }
2455
2456         if (!mddev->events) {
2457                 /*
2458                  * oops, this 64-bit counter should never wrap.
2459                  * Either we are in around ~1 trillion A.C., assuming
2460                  * 1 reboot per second, or we have a bug:
2461                  */
2462                 MD_BUG();
2463                 mddev->events --;
2464         }
2465
2466         rdev_for_each(rdev, mddev) {
2467                 if (rdev->badblocks.changed)
2468                         any_badblocks_changed++;
2469                 if (test_bit(Faulty, &rdev->flags))
2470                         set_bit(FaultRecorded, &rdev->flags);
2471         }
2472
2473         sync_sbs(mddev, nospares);
2474         spin_unlock_irq(&mddev->write_lock);
2475
2476         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2477                  mdname(mddev), mddev->in_sync);
2478
2479         bitmap_update_sb(mddev->bitmap);
2480         rdev_for_each(rdev, mddev) {
2481                 char b[BDEVNAME_SIZE];
2482
2483                 if (rdev->sb_loaded != 1)
2484                         continue; /* no noise on spare devices */
2485
2486                 if (!test_bit(Faulty, &rdev->flags)) {
2487                         md_super_write(mddev,rdev,
2488                                        rdev->sb_start, rdev->sb_size,
2489                                        rdev->sb_page);
2490                         pr_debug("md: (write) %s's sb offset: %llu\n",
2491                                  bdevname(rdev->bdev, b),
2492                                  (unsigned long long)rdev->sb_start);
2493                         rdev->sb_events = mddev->events;
2494                         if (rdev->badblocks.size) {
2495                                 md_super_write(mddev, rdev,
2496                                                rdev->badblocks.sector,
2497                                                rdev->badblocks.size << 9,
2498                                                rdev->bb_page);
2499                                 rdev->badblocks.size = 0;
2500                         }
2501
2502                 } else
2503                         pr_debug("md: %s (skipping faulty)\n",
2504                                  bdevname(rdev->bdev, b));
2505
2506                 if (mddev->level == LEVEL_MULTIPATH)
2507                         /* only need to write one superblock... */
2508                         break;
2509         }
2510         md_super_wait(mddev);
2511         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2512
2513         spin_lock_irq(&mddev->write_lock);
2514         if (mddev->in_sync != sync_req ||
2515             test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
2516                 /* have to write it out again */
2517                 spin_unlock_irq(&mddev->write_lock);
2518                 goto repeat;
2519         }
2520         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2521         spin_unlock_irq(&mddev->write_lock);
2522         wake_up(&mddev->sb_wait);
2523         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2524                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2525
2526         rdev_for_each(rdev, mddev) {
2527                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2528                         clear_bit(Blocked, &rdev->flags);
2529
2530                 if (any_badblocks_changed)
2531                         md_ack_all_badblocks(&rdev->badblocks);
2532                 clear_bit(BlockedBadBlocks, &rdev->flags);
2533                 wake_up(&rdev->blocked_wait);
2534         }
2535 }
2536
2537 /* words written to sysfs files may, or may not, be \n terminated.
2538  * We want to accept with case. For this we use cmd_match.
2539  */
2540 static int cmd_match(const char *cmd, const char *str)
2541 {
2542         /* See if cmd, written into a sysfs file, matches
2543          * str.  They must either be the same, or cmd can
2544          * have a trailing newline
2545          */
2546         while (*cmd && *str && *cmd == *str) {
2547                 cmd++;
2548                 str++;
2549         }
2550         if (*cmd == '\n')
2551                 cmd++;
2552         if (*str || *cmd)
2553                 return 0;
2554         return 1;
2555 }
2556
2557 struct rdev_sysfs_entry {
2558         struct attribute attr;
2559         ssize_t (*show)(struct md_rdev *, char *);
2560         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2561 };
2562
2563 static ssize_t
2564 state_show(struct md_rdev *rdev, char *page)
2565 {
2566         char *sep = "";
2567         size_t len = 0;
2568
2569         if (test_bit(Faulty, &rdev->flags) ||
2570             rdev->badblocks.unacked_exist) {
2571                 len+= sprintf(page+len, "%sfaulty",sep);
2572                 sep = ",";
2573         }
2574         if (test_bit(In_sync, &rdev->flags)) {
2575                 len += sprintf(page+len, "%sin_sync",sep);
2576                 sep = ",";
2577         }
2578         if (test_bit(WriteMostly, &rdev->flags)) {
2579                 len += sprintf(page+len, "%swrite_mostly",sep);
2580                 sep = ",";
2581         }
2582         if (test_bit(Blocked, &rdev->flags) ||
2583             (rdev->badblocks.unacked_exist
2584              && !test_bit(Faulty, &rdev->flags))) {
2585                 len += sprintf(page+len, "%sblocked", sep);
2586                 sep = ",";
2587         }
2588         if (!test_bit(Faulty, &rdev->flags) &&
2589             !test_bit(In_sync, &rdev->flags)) {
2590                 len += sprintf(page+len, "%sspare", sep);
2591                 sep = ",";
2592         }
2593         if (test_bit(WriteErrorSeen, &rdev->flags)) {
2594                 len += sprintf(page+len, "%swrite_error", sep);
2595                 sep = ",";
2596         }
2597         if (test_bit(WantReplacement, &rdev->flags)) {
2598                 len += sprintf(page+len, "%swant_replacement", sep);
2599                 sep = ",";
2600         }
2601         if (test_bit(Replacement, &rdev->flags)) {
2602                 len += sprintf(page+len, "%sreplacement", sep);
2603                 sep = ",";
2604         }
2605
2606         return len+sprintf(page+len, "\n");
2607 }
2608
2609 static ssize_t
2610 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2611 {
2612         /* can write
2613          *  faulty  - simulates an error
2614          *  remove  - disconnects the device
2615          *  writemostly - sets write_mostly
2616          *  -writemostly - clears write_mostly
2617          *  blocked - sets the Blocked flags
2618          *  -blocked - clears the Blocked and possibly simulates an error
2619          *  insync - sets Insync providing device isn't active
2620          *  -insync - clear Insync for a device with a slot assigned,
2621          *            so that it gets rebuilt based on bitmap
2622          *  write_error - sets WriteErrorSeen
2623          *  -write_error - clears WriteErrorSeen
2624          */
2625         int err = -EINVAL;
2626         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2627                 md_error(rdev->mddev, rdev);
2628                 if (test_bit(Faulty, &rdev->flags))
2629                         err = 0;
2630                 else
2631                         err = -EBUSY;
2632         } else if (cmd_match(buf, "remove")) {
2633                 if (rdev->raid_disk >= 0)
2634                         err = -EBUSY;
2635                 else {
2636                         struct mddev *mddev = rdev->mddev;
2637                         kick_rdev_from_array(rdev);
2638                         if (mddev->pers)
2639                                 md_update_sb(mddev, 1);
2640                         md_new_event(mddev);
2641                         err = 0;
2642                 }
2643         } else if (cmd_match(buf, "writemostly")) {
2644                 set_bit(WriteMostly, &rdev->flags);
2645                 err = 0;
2646         } else if (cmd_match(buf, "-writemostly")) {
2647                 clear_bit(WriteMostly, &rdev->flags);
2648                 err = 0;
2649         } else if (cmd_match(buf, "blocked")) {
2650                 set_bit(Blocked, &rdev->flags);
2651                 err = 0;
2652         } else if (cmd_match(buf, "-blocked")) {
2653                 if (!test_bit(Faulty, &rdev->flags) &&
2654                     rdev->badblocks.unacked_exist) {
2655                         /* metadata handler doesn't understand badblocks,
2656                          * so we need to fail the device
2657                          */
2658                         md_error(rdev->mddev, rdev);
2659                 }
2660                 clear_bit(Blocked, &rdev->flags);
2661                 clear_bit(BlockedBadBlocks, &rdev->flags);
2662                 wake_up(&rdev->blocked_wait);
2663                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2664                 md_wakeup_thread(rdev->mddev->thread);
2665
2666                 err = 0;
2667         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2668                 set_bit(In_sync, &rdev->flags);
2669                 err = 0;
2670         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0) {
2671                 clear_bit(In_sync, &rdev->flags);
2672                 rdev->saved_raid_disk = rdev->raid_disk;
2673                 rdev->raid_disk = -1;
2674                 err = 0;
2675         } else if (cmd_match(buf, "write_error")) {
2676                 set_bit(WriteErrorSeen, &rdev->flags);
2677                 err = 0;
2678         } else if (cmd_match(buf, "-write_error")) {
2679                 clear_bit(WriteErrorSeen, &rdev->flags);
2680                 err = 0;
2681         } else if (cmd_match(buf, "want_replacement")) {
2682                 /* Any non-spare device that is not a replacement can
2683                  * become want_replacement at any time, but we then need to
2684                  * check if recovery is needed.
2685                  */
2686                 if (rdev->raid_disk >= 0 &&
2687                     !test_bit(Replacement, &rdev->flags))
2688                         set_bit(WantReplacement, &rdev->flags);
2689                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2690                 md_wakeup_thread(rdev->mddev->thread);
2691                 err = 0;
2692         } else if (cmd_match(buf, "-want_replacement")) {
2693                 /* Clearing 'want_replacement' is always allowed.
2694                  * Once replacements starts it is too late though.
2695                  */
2696                 err = 0;
2697                 clear_bit(WantReplacement, &rdev->flags);
2698         } else if (cmd_match(buf, "replacement")) {
2699                 /* Can only set a device as a replacement when array has not
2700                  * yet been started.  Once running, replacement is automatic
2701                  * from spares, or by assigning 'slot'.
2702                  */
2703                 if (rdev->mddev->pers)
2704                         err = -EBUSY;
2705                 else {
2706                         set_bit(Replacement, &rdev->flags);
2707                         err = 0;
2708                 }
2709         } else if (cmd_match(buf, "-replacement")) {
2710                 /* Similarly, can only clear Replacement before start */
2711                 if (rdev->mddev->pers)
2712                         err = -EBUSY;
2713                 else {
2714                         clear_bit(Replacement, &rdev->flags);
2715                         err = 0;
2716                 }
2717         }
2718         if (!err)
2719                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2720         return err ? err : len;
2721 }
2722 static struct rdev_sysfs_entry rdev_state =
2723 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
2724
2725 static ssize_t
2726 errors_show(struct md_rdev *rdev, char *page)
2727 {
2728         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2729 }
2730
2731 static ssize_t
2732 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2733 {
2734         char *e;
2735         unsigned long n = simple_strtoul(buf, &e, 10);
2736         if (*buf && (*e == 0 || *e == '\n')) {
2737                 atomic_set(&rdev->corrected_errors, n);
2738                 return len;
2739         }
2740         return -EINVAL;
2741 }
2742 static struct rdev_sysfs_entry rdev_errors =
2743 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2744
2745 static ssize_t
2746 slot_show(struct md_rdev *rdev, char *page)
2747 {
2748         if (rdev->raid_disk < 0)
2749                 return sprintf(page, "none\n");
2750         else
2751                 return sprintf(page, "%d\n", rdev->raid_disk);
2752 }
2753
2754 static ssize_t
2755 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2756 {
2757         char *e;
2758         int err;
2759         int slot = simple_strtoul(buf, &e, 10);
2760         if (strncmp(buf, "none", 4)==0)
2761                 slot = -1;
2762         else if (e==buf || (*e && *e!= '\n'))
2763                 return -EINVAL;
2764         if (rdev->mddev->pers && slot == -1) {
2765                 /* Setting 'slot' on an active array requires also
2766                  * updating the 'rd%d' link, and communicating
2767                  * with the personality with ->hot_*_disk.
2768                  * For now we only support removing
2769                  * failed/spare devices.  This normally happens automatically,
2770                  * but not when the metadata is externally managed.
2771                  */
2772                 if (rdev->raid_disk == -1)
2773                         return -EEXIST;
2774                 /* personality does all needed checks */
2775                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2776                         return -EINVAL;
2777                 clear_bit(Blocked, &rdev->flags);
2778                 remove_and_add_spares(rdev->mddev, rdev);
2779                 if (rdev->raid_disk >= 0)
2780                         return -EBUSY;
2781                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2782                 md_wakeup_thread(rdev->mddev->thread);
2783         } else if (rdev->mddev->pers) {
2784                 /* Activating a spare .. or possibly reactivating
2785                  * if we ever get bitmaps working here.
2786                  */
2787
2788                 if (rdev->raid_disk != -1)
2789                         return -EBUSY;
2790
2791                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2792                         return -EBUSY;
2793
2794                 if (rdev->mddev->pers->hot_add_disk == NULL)
2795                         return -EINVAL;
2796
2797                 if (slot >= rdev->mddev->raid_disks &&
2798                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2799                         return -ENOSPC;
2800
2801                 rdev->raid_disk = slot;
2802                 if (test_bit(In_sync, &rdev->flags))
2803                         rdev->saved_raid_disk = slot;
2804                 else
2805                         rdev->saved_raid_disk = -1;
2806                 clear_bit(In_sync, &rdev->flags);
2807                 clear_bit(Bitmap_sync, &rdev->flags);
2808                 err = rdev->mddev->pers->
2809                         hot_add_disk(rdev->mddev, rdev);
2810                 if (err) {
2811                         rdev->raid_disk = -1;
2812                         return err;
2813                 } else
2814                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2815                 if (sysfs_link_rdev(rdev->mddev, rdev))
2816                         /* failure here is OK */;
2817                 /* don't wakeup anyone, leave that to userspace. */
2818         } else {
2819                 if (slot >= rdev->mddev->raid_disks &&
2820                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2821                         return -ENOSPC;
2822                 rdev->raid_disk = slot;
2823                 /* assume it is working */
2824                 clear_bit(Faulty, &rdev->flags);
2825                 clear_bit(WriteMostly, &rdev->flags);
2826                 set_bit(In_sync, &rdev->flags);
2827                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2828         }
2829         return len;
2830 }
2831
2832
2833 static struct rdev_sysfs_entry rdev_slot =
2834 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2835
2836 static ssize_t
2837 offset_show(struct md_rdev *rdev, char *page)
2838 {
2839         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2840 }
2841
2842 static ssize_t
2843 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2844 {
2845         unsigned long long offset;
2846         if (kstrtoull(buf, 10, &offset) < 0)
2847                 return -EINVAL;
2848         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2849                 return -EBUSY;
2850         if (rdev->sectors && rdev->mddev->external)
2851                 /* Must set offset before size, so overlap checks
2852                  * can be sane */
2853                 return -EBUSY;
2854         rdev->data_offset = offset;
2855         rdev->new_data_offset = offset;
2856         return len;
2857 }
2858
2859 static struct rdev_sysfs_entry rdev_offset =
2860 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2861
2862 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2863 {
2864         return sprintf(page, "%llu\n",
2865                        (unsigned long long)rdev->new_data_offset);
2866 }
2867
2868 static ssize_t new_offset_store(struct md_rdev *rdev,
2869                                 const char *buf, size_t len)
2870 {
2871         unsigned long long new_offset;
2872         struct mddev *mddev = rdev->mddev;
2873
2874         if (kstrtoull(buf, 10, &new_offset) < 0)
2875                 return -EINVAL;
2876
2877         if (mddev->sync_thread)
2878                 return -EBUSY;
2879         if (new_offset == rdev->data_offset)
2880                 /* reset is always permitted */
2881                 ;
2882         else if (new_offset > rdev->data_offset) {
2883                 /* must not push array size beyond rdev_sectors */
2884                 if (new_offset - rdev->data_offset
2885                     + mddev->dev_sectors > rdev->sectors)
2886                                 return -E2BIG;
2887         }
2888         /* Metadata worries about other space details. */
2889
2890         /* decreasing the offset is inconsistent with a backwards
2891          * reshape.
2892          */
2893         if (new_offset < rdev->data_offset &&
2894             mddev->reshape_backwards)
2895                 return -EINVAL;
2896         /* Increasing offset is inconsistent with forwards
2897          * reshape.  reshape_direction should be set to
2898          * 'backwards' first.
2899          */
2900         if (new_offset > rdev->data_offset &&
2901             !mddev->reshape_backwards)
2902                 return -EINVAL;
2903
2904         if (mddev->pers && mddev->persistent &&
2905             !super_types[mddev->major_version]
2906             .allow_new_offset(rdev, new_offset))
2907                 return -E2BIG;
2908         rdev->new_data_offset = new_offset;
2909         if (new_offset > rdev->data_offset)
2910                 mddev->reshape_backwards = 1;
2911         else if (new_offset < rdev->data_offset)
2912                 mddev->reshape_backwards = 0;
2913
2914         return len;
2915 }
2916 static struct rdev_sysfs_entry rdev_new_offset =
2917 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2918
2919 static ssize_t
2920 rdev_size_show(struct md_rdev *rdev, char *page)
2921 {
2922         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2923 }
2924
2925 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2926 {
2927         /* check if two start/length pairs overlap */
2928         if (s1+l1 <= s2)
2929                 return 0;
2930         if (s2+l2 <= s1)
2931                 return 0;
2932         return 1;
2933 }
2934
2935 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2936 {
2937         unsigned long long blocks;
2938         sector_t new;
2939
2940         if (kstrtoull(buf, 10, &blocks) < 0)
2941                 return -EINVAL;
2942
2943         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2944                 return -EINVAL; /* sector conversion overflow */
2945
2946         new = blocks * 2;
2947         if (new != blocks * 2)
2948                 return -EINVAL; /* unsigned long long to sector_t overflow */
2949
2950         *sectors = new;
2951         return 0;
2952 }
2953
2954 static ssize_t
2955 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2956 {
2957         struct mddev *my_mddev = rdev->mddev;
2958         sector_t oldsectors = rdev->sectors;
2959         sector_t sectors;
2960
2961         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2962                 return -EINVAL;
2963         if (rdev->data_offset != rdev->new_data_offset)
2964                 return -EINVAL; /* too confusing */
2965         if (my_mddev->pers && rdev->raid_disk >= 0) {
2966                 if (my_mddev->persistent) {
2967                         sectors = super_types[my_mddev->major_version].
2968                                 rdev_size_change(rdev, sectors);
2969                         if (!sectors)
2970                                 return -EBUSY;
2971                 } else if (!sectors)
2972                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2973                                 rdev->data_offset;
2974                 if (!my_mddev->pers->resize)
2975                         /* Cannot change size for RAID0 or Linear etc */
2976                         return -EINVAL;
2977         }
2978         if (sectors < my_mddev->dev_sectors)
2979                 return -EINVAL; /* component must fit device */
2980
2981         rdev->sectors = sectors;
2982         if (sectors > oldsectors && my_mddev->external) {
2983                 /* need to check that all other rdevs with the same ->bdev
2984                  * do not overlap.  We need to unlock the mddev to avoid
2985                  * a deadlock.  We have already changed rdev->sectors, and if
2986                  * we have to change it back, we will have the lock again.
2987                  */
2988                 struct mddev *mddev;
2989                 int overlap = 0;
2990                 struct list_head *tmp;
2991
2992                 mddev_unlock(my_mddev);
2993                 for_each_mddev(mddev, tmp) {
2994                         struct md_rdev *rdev2;
2995
2996                         mddev_lock_nointr(mddev);
2997                         rdev_for_each(rdev2, mddev)
2998                                 if (rdev->bdev == rdev2->bdev &&
2999                                     rdev != rdev2 &&
3000                                     overlaps(rdev->data_offset, rdev->sectors,
3001                                              rdev2->data_offset,
3002                                              rdev2->sectors)) {
3003                                         overlap = 1;
3004                                         break;
3005                                 }
3006                         mddev_unlock(mddev);
3007                         if (overlap) {
3008                                 mddev_put(mddev);
3009                                 break;
3010                         }
3011                 }
3012                 mddev_lock_nointr(my_mddev);
3013                 if (overlap) {
3014                         /* Someone else could have slipped in a size
3015                          * change here, but doing so is just silly.
3016                          * We put oldsectors back because we *know* it is
3017                          * safe, and trust userspace not to race with
3018                          * itself
3019                          */
3020                         rdev->sectors = oldsectors;
3021                         return -EBUSY;
3022                 }
3023         }
3024         return len;
3025 }
3026
3027 static struct rdev_sysfs_entry rdev_size =
3028 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3029
3030
3031 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3032 {
3033         unsigned long long recovery_start = rdev->recovery_offset;
3034
3035         if (test_bit(In_sync, &rdev->flags) ||
3036             recovery_start == MaxSector)
3037                 return sprintf(page, "none\n");
3038
3039         return sprintf(page, "%llu\n", recovery_start);
3040 }
3041
3042 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3043 {
3044         unsigned long long recovery_start;
3045
3046         if (cmd_match(buf, "none"))
3047                 recovery_start = MaxSector;
3048         else if (kstrtoull(buf, 10, &recovery_start))
3049                 return -EINVAL;
3050
3051         if (rdev->mddev->pers &&
3052             rdev->raid_disk >= 0)
3053                 return -EBUSY;
3054
3055         rdev->recovery_offset = recovery_start;
3056         if (recovery_start == MaxSector)
3057                 set_bit(In_sync, &rdev->flags);
3058         else
3059                 clear_bit(In_sync, &rdev->flags);
3060         return len;
3061 }
3062
3063 static struct rdev_sysfs_entry rdev_recovery_start =
3064 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3065
3066
3067 static ssize_t
3068 badblocks_show(struct badblocks *bb, char *page, int unack);
3069 static ssize_t
3070 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack);
3071
3072 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3073 {
3074         return badblocks_show(&rdev->badblocks, page, 0);
3075 }
3076 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3077 {
3078         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3079         /* Maybe that ack was all we needed */
3080         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3081                 wake_up(&rdev->blocked_wait);
3082         return rv;
3083 }
3084 static struct rdev_sysfs_entry rdev_bad_blocks =
3085 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3086
3087
3088 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3089 {
3090         return badblocks_show(&rdev->badblocks, page, 1);
3091 }
3092 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3093 {
3094         return badblocks_store(&rdev->badblocks, page, len, 1);
3095 }
3096 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3097 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3098
3099 static struct attribute *rdev_default_attrs[] = {
3100         &rdev_state.attr,
3101         &rdev_errors.attr,
3102         &rdev_slot.attr,
3103         &rdev_offset.attr,
3104         &rdev_new_offset.attr,
3105         &rdev_size.attr,
3106         &rdev_recovery_start.attr,
3107         &rdev_bad_blocks.attr,
3108         &rdev_unack_bad_blocks.attr,
3109         NULL,
3110 };
3111 static ssize_t
3112 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3113 {
3114         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3115         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3116         struct mddev *mddev = rdev->mddev;
3117         ssize_t rv;
3118
3119         if (!entry->show)
3120                 return -EIO;
3121
3122         rv = mddev ? mddev_lock(mddev) : -EBUSY;
3123         if (!rv) {
3124                 if (rdev->mddev == NULL)
3125                         rv = -EBUSY;
3126                 else
3127                         rv = entry->show(rdev, page);
3128                 mddev_unlock(mddev);
3129         }
3130         return rv;
3131 }
3132
3133 static ssize_t
3134 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3135               const char *page, size_t length)
3136 {
3137         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3138         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3139         ssize_t rv;
3140         struct mddev *mddev = rdev->mddev;
3141
3142         if (!entry->store)
3143                 return -EIO;
3144         if (!capable(CAP_SYS_ADMIN))
3145                 return -EACCES;
3146         rv = mddev ? mddev_lock(mddev): -EBUSY;
3147         if (!rv) {
3148                 if (rdev->mddev == NULL)
3149                         rv = -EBUSY;
3150                 else
3151                         rv = entry->store(rdev, page, length);
3152                 mddev_unlock(mddev);
3153         }
3154         return rv;
3155 }
3156
3157 static void rdev_free(struct kobject *ko)
3158 {
3159         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3160         kfree(rdev);
3161 }
3162 static const struct sysfs_ops rdev_sysfs_ops = {
3163         .show           = rdev_attr_show,
3164         .store          = rdev_attr_store,
3165 };
3166 static struct kobj_type rdev_ktype = {
3167         .release        = rdev_free,
3168         .sysfs_ops      = &rdev_sysfs_ops,
3169         .default_attrs  = rdev_default_attrs,
3170 };
3171
3172 int md_rdev_init(struct md_rdev *rdev)
3173 {
3174         rdev->desc_nr = -1;
3175         rdev->saved_raid_disk = -1;
3176         rdev->raid_disk = -1;
3177         rdev->flags = 0;
3178         rdev->data_offset = 0;
3179         rdev->new_data_offset = 0;
3180         rdev->sb_events = 0;
3181         rdev->last_read_error.tv_sec  = 0;
3182         rdev->last_read_error.tv_nsec = 0;
3183         rdev->sb_loaded = 0;
3184         rdev->bb_page = NULL;
3185         atomic_set(&rdev->nr_pending, 0);
3186         atomic_set(&rdev->read_errors, 0);
3187         atomic_set(&rdev->corrected_errors, 0);
3188
3189         INIT_LIST_HEAD(&rdev->same_set);
3190         init_waitqueue_head(&rdev->blocked_wait);
3191
3192         /* Add space to store bad block list.
3193          * This reserves the space even on arrays where it cannot
3194          * be used - I wonder if that matters
3195          */
3196         rdev->badblocks.count = 0;
3197         rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
3198         rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
3199         seqlock_init(&rdev->badblocks.lock);
3200         if (rdev->badblocks.page == NULL)
3201                 return -ENOMEM;
3202
3203         return 0;
3204 }
3205 EXPORT_SYMBOL_GPL(md_rdev_init);
3206 /*
3207  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3208  *
3209  * mark the device faulty if:
3210  *
3211  *   - the device is nonexistent (zero size)
3212  *   - the device has no valid superblock
3213  *
3214  * a faulty rdev _never_ has rdev->sb set.
3215  */
3216 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3217 {
3218         char b[BDEVNAME_SIZE];
3219         int err;
3220         struct md_rdev *rdev;
3221         sector_t size;
3222
3223         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3224         if (!rdev) {
3225                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
3226                 return ERR_PTR(-ENOMEM);
3227         }
3228
3229         err = md_rdev_init(rdev);
3230         if (err)
3231                 goto abort_free;
3232         err = alloc_disk_sb(rdev);
3233         if (err)
3234                 goto abort_free;
3235
3236         err = lock_rdev(rdev, newdev, super_format == -2);
3237         if (err)
3238                 goto abort_free;
3239
3240         kobject_init(&rdev->kobj, &rdev_ktype);
3241
3242         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3243         if (!size) {
3244                 printk(KERN_WARNING 
3245                         "md: %s has zero or unknown size, marking faulty!\n",
3246                         bdevname(rdev->bdev,b));
3247                 err = -EINVAL;
3248                 goto abort_free;
3249         }
3250
3251         if (super_format >= 0) {
3252                 err = super_types[super_format].
3253                         load_super(rdev, NULL, super_minor);
3254                 if (err == -EINVAL) {
3255                         printk(KERN_WARNING
3256                                 "md: %s does not have a valid v%d.%d "
3257                                "superblock, not importing!\n",
3258                                 bdevname(rdev->bdev,b),
3259                                super_format, super_minor);
3260                         goto abort_free;
3261                 }
3262                 if (err < 0) {
3263                         printk(KERN_WARNING 
3264                                 "md: could not read %s's sb, not importing!\n",
3265                                 bdevname(rdev->bdev,b));
3266                         goto abort_free;
3267                 }
3268         }
3269
3270         return rdev;
3271
3272 abort_free:
3273         if (rdev->bdev)
3274                 unlock_rdev(rdev);
3275         md_rdev_clear(rdev);
3276         kfree(rdev);
3277         return ERR_PTR(err);
3278 }
3279
3280 /*
3281  * Check a full RAID array for plausibility
3282  */
3283
3284
3285 static void analyze_sbs(struct mddev * mddev)
3286 {
3287         int i;
3288         struct md_rdev *rdev, *freshest, *tmp;
3289         char b[BDEVNAME_SIZE];
3290
3291         freshest = NULL;
3292         rdev_for_each_safe(rdev, tmp, mddev)
3293                 switch (super_types[mddev->major_version].
3294                         load_super(rdev, freshest, mddev->minor_version)) {
3295                 case 1:
3296                         freshest = rdev;
3297                         break;
3298                 case 0:
3299                         break;
3300                 default:
3301                         printk( KERN_ERR \
3302                                 "md: fatal superblock inconsistency in %s"
3303                                 " -- removing from array\n", 
3304                                 bdevname(rdev->bdev,b));
3305                         kick_rdev_from_array(rdev);
3306                 }
3307
3308
3309         super_types[mddev->major_version].
3310                 validate_super(mddev, freshest);
3311
3312         i = 0;
3313         rdev_for_each_safe(rdev, tmp, mddev) {
3314                 if (mddev->max_disks &&
3315                     (rdev->desc_nr >= mddev->max_disks ||
3316                      i > mddev->max_disks)) {
3317                         printk(KERN_WARNING
3318                                "md: %s: %s: only %d devices permitted\n",
3319                                mdname(mddev), bdevname(rdev->bdev, b),
3320                                mddev->max_disks);
3321                         kick_rdev_from_array(rdev);
3322                         continue;
3323                 }
3324                 if (rdev != freshest)
3325                         if (super_types[mddev->major_version].
3326                             validate_super(mddev, rdev)) {
3327                                 printk(KERN_WARNING "md: kicking non-fresh %s"
3328                                         " from array!\n",
3329                                         bdevname(rdev->bdev,b));
3330                                 kick_rdev_from_array(rdev);
3331                                 continue;
3332                         }
3333                 if (mddev->level == LEVEL_MULTIPATH) {
3334                         rdev->desc_nr = i++;
3335                         rdev->raid_disk = rdev->desc_nr;
3336                         set_bit(In_sync, &rdev->flags);
3337                 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) {
3338                         rdev->raid_disk = -1;
3339                         clear_bit(In_sync, &rdev->flags);
3340                 }
3341         }
3342 }
3343
3344 /* Read a fixed-point number.
3345  * Numbers in sysfs attributes should be in "standard" units where
3346  * possible, so time should be in seconds.
3347  * However we internally use a a much smaller unit such as 
3348  * milliseconds or jiffies.
3349  * This function takes a decimal number with a possible fractional
3350  * component, and produces an integer which is the result of
3351  * multiplying that number by 10^'scale'.
3352  * all without any floating-point arithmetic.
3353  */
3354 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3355 {
3356         unsigned long result = 0;
3357         long decimals = -1;
3358         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3359                 if (*cp == '.')
3360                         decimals = 0;
3361                 else if (decimals < scale) {
3362                         unsigned int value;
3363                         value = *cp - '0';
3364                         result = result * 10 + value;
3365                         if (decimals >= 0)
3366                                 decimals++;
3367                 }
3368                 cp++;
3369         }
3370         if (*cp == '\n')
3371                 cp++;
3372         if (*cp)
3373                 return -EINVAL;
3374         if (decimals < 0)
3375                 decimals = 0;
3376         while (decimals < scale) {
3377                 result *= 10;
3378                 decimals ++;
3379         }
3380         *res = result;
3381         return 0;
3382 }
3383
3384
3385 static void md_safemode_timeout(unsigned long data);
3386
3387 static ssize_t
3388 safe_delay_show(struct mddev *mddev, char *page)
3389 {
3390         int msec = (mddev->safemode_delay*1000)/HZ;
3391         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3392 }
3393 static ssize_t
3394 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3395 {
3396         unsigned long msec;
3397
3398         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3399                 return -EINVAL;
3400         if (msec == 0)
3401                 mddev->safemode_delay = 0;
3402         else {
3403                 unsigned long old_delay = mddev->safemode_delay;
3404                 mddev->safemode_delay = (msec*HZ)/1000;
3405                 if (mddev->safemode_delay == 0)
3406                         mddev->safemode_delay = 1;
3407                 if (mddev->safemode_delay < old_delay || old_delay == 0)
3408                         md_safemode_timeout((unsigned long)mddev);
3409         }
3410         return len;
3411 }
3412 static struct md_sysfs_entry md_safe_delay =
3413 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3414
3415 static ssize_t
3416 level_show(struct mddev *mddev, char *page)
3417 {
3418         struct md_personality *p = mddev->pers;
3419         if (p)
3420                 return sprintf(page, "%s\n", p->name);
3421         else if (mddev->clevel[0])
3422                 return sprintf(page, "%s\n", mddev->clevel);
3423         else if (mddev->level != LEVEL_NONE)
3424                 return sprintf(page, "%d\n", mddev->level);
3425         else
3426                 return 0;
3427 }
3428
3429 static ssize_t
3430 level_store(struct mddev *mddev, const char *buf, size_t len)
3431 {
3432         char clevel[16];
3433         ssize_t rv = len;
3434         struct md_personality *pers;
3435         long level;
3436         void *priv;
3437         struct md_rdev *rdev;
3438
3439         if (mddev->pers == NULL) {
3440                 if (len == 0)
3441                         return 0;
3442                 if (len >= sizeof(mddev->clevel))
3443                         return -ENOSPC;
3444                 strncpy(mddev->clevel, buf, len);
3445                 if (mddev->clevel[len-1] == '\n')
3446                         len--;
3447                 mddev->clevel[len] = 0;
3448                 mddev->level = LEVEL_NONE;
3449                 return rv;
3450         }
3451
3452         /* request to change the personality.  Need to ensure:
3453          *  - array is not engaged in resync/recovery/reshape
3454          *  - old personality can be suspended
3455          *  - new personality will access other array.
3456          */
3457
3458         if (mddev->sync_thread ||
3459             mddev->reshape_position != MaxSector ||
3460             mddev->sysfs_active)
3461                 return -EBUSY;
3462
3463         if (!mddev->pers->quiesce) {
3464                 printk(KERN_WARNING "md: %s: %s does not support online personality change\n",
3465                        mdname(mddev), mddev->pers->name);
3466                 return -EINVAL;
3467         }
3468
3469         /* Now find the new personality */
3470         if (len == 0 || len >= sizeof(clevel))
3471                 return -EINVAL;
3472         strncpy(clevel, buf, len);
3473         if (clevel[len-1] == '\n')
3474                 len--;
3475         clevel[len] = 0;
3476         if (kstrtol(clevel, 10, &level))
3477                 level = LEVEL_NONE;
3478
3479         if (request_module("md-%s", clevel) != 0)
3480                 request_module("md-level-%s", clevel);
3481         spin_lock(&pers_lock);
3482         pers = find_pers(level, clevel);
3483         if (!pers || !try_module_get(pers->owner)) {
3484                 spin_unlock(&pers_lock);
3485                 printk(KERN_WARNING "md: personality %s not loaded\n", clevel);
3486                 return -EINVAL;
3487         }
3488         spin_unlock(&pers_lock);
3489
3490         if (pers == mddev->pers) {
3491                 /* Nothing to do! */
3492                 module_put(pers->owner);
3493                 return rv;
3494         }
3495         if (!pers->takeover) {
3496                 module_put(pers->owner);
3497                 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n",
3498                        mdname(mddev), clevel);
3499                 return -EINVAL;
3500         }
3501
3502         rdev_for_each(rdev, mddev)
3503                 rdev->new_raid_disk = rdev->raid_disk;
3504
3505         /* ->takeover must set new_* and/or delta_disks
3506          * if it succeeds, and may set them when it fails.
3507          */
3508         priv = pers->takeover(mddev);
3509         if (IS_ERR(priv)) {
3510                 mddev->new_level = mddev->level;
3511                 mddev->new_layout = mddev->layout;
3512                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3513                 mddev->raid_disks -= mddev->delta_disks;
3514                 mddev->delta_disks = 0;
3515                 mddev->reshape_backwards = 0;
3516                 module_put(pers->owner);
3517                 printk(KERN_WARNING "md: %s: %s would not accept array\n",
3518                        mdname(mddev), clevel);
3519                 return PTR_ERR(priv);
3520         }
3521
3522         /* Looks like we have a winner */
3523         mddev_suspend(mddev);
3524         mddev->pers->stop(mddev);
3525         
3526         if (mddev->pers->sync_request == NULL &&
3527             pers->sync_request != NULL) {
3528                 /* need to add the md_redundancy_group */
3529                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3530                         printk(KERN_WARNING
3531                                "md: cannot register extra attributes for %s\n",
3532                                mdname(mddev));
3533                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3534         }               
3535         if (mddev->pers->sync_request != NULL &&
3536             pers->sync_request == NULL) {
3537                 /* need to remove the md_redundancy_group */
3538                 if (mddev->to_remove == NULL)
3539                         mddev->to_remove = &md_redundancy_group;
3540         }
3541
3542         if (mddev->pers->sync_request == NULL &&
3543             mddev->external) {
3544                 /* We are converting from a no-redundancy array
3545                  * to a redundancy array and metadata is managed
3546                  * externally so we need to be sure that writes
3547                  * won't block due to a need to transition
3548                  *      clean->dirty
3549                  * until external management is started.
3550                  */
3551                 mddev->in_sync = 0;
3552                 mddev->safemode_delay = 0;
3553                 mddev->safemode = 0;
3554         }
3555
3556         rdev_for_each(rdev, mddev) {
3557                 if (rdev->raid_disk < 0)
3558                         continue;
3559                 if (rdev->new_raid_disk >= mddev->raid_disks)
3560                         rdev->new_raid_disk = -1;
3561                 if (rdev->new_raid_disk == rdev->raid_disk)
3562                         continue;
3563                 sysfs_unlink_rdev(mddev, rdev);
3564         }
3565         rdev_for_each(rdev, mddev) {
3566                 if (rdev->raid_disk < 0)
3567                         continue;
3568                 if (rdev->new_raid_disk == rdev->raid_disk)
3569                         continue;
3570                 rdev->raid_disk = rdev->new_raid_disk;
3571                 if (rdev->raid_disk < 0)
3572                         clear_bit(In_sync, &rdev->flags);
3573                 else {
3574                         if (sysfs_link_rdev(mddev, rdev))
3575                                 printk(KERN_WARNING "md: cannot register rd%d"
3576                                        " for %s after level change\n",
3577                                        rdev->raid_disk, mdname(mddev));
3578                 }
3579         }
3580
3581         module_put(mddev->pers->owner);
3582         mddev->pers = pers;
3583         mddev->private = priv;
3584         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3585         mddev->level = mddev->new_level;
3586         mddev->layout = mddev->new_layout;
3587         mddev->chunk_sectors = mddev->new_chunk_sectors;
3588         mddev->delta_disks = 0;
3589         mddev->reshape_backwards = 0;
3590         mddev->degraded = 0;
3591         if (mddev->pers->sync_request == NULL) {
3592                 /* this is now an array without redundancy, so
3593                  * it must always be in_sync
3594                  */
3595                 mddev->in_sync = 1;
3596                 del_timer_sync(&mddev->safemode_timer);
3597         }
3598         blk_set_stacking_limits(&mddev->queue->limits);
3599         pers->run(mddev);
3600         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3601         mddev_resume(mddev);
3602         if (!mddev->thread)
3603                 md_update_sb(mddev, 1);
3604         sysfs_notify(&mddev->kobj, NULL, "level");
3605         md_new_event(mddev);
3606         return rv;
3607 }
3608
3609 static struct md_sysfs_entry md_level =
3610 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3611
3612
3613 static ssize_t
3614 layout_show(struct mddev *mddev, char *page)
3615 {
3616         /* just a number, not meaningful for all levels */
3617         if (mddev->reshape_position != MaxSector &&
3618             mddev->layout != mddev->new_layout)
3619                 return sprintf(page, "%d (%d)\n",
3620                                mddev->new_layout, mddev->layout);
3621         return sprintf(page, "%d\n", mddev->layout);
3622 }
3623
3624 static ssize_t
3625 layout_store(struct mddev *mddev, const char *buf, size_t len)
3626 {
3627         char *e;
3628         unsigned long n = simple_strtoul(buf, &e, 10);
3629
3630         if (!*buf || (*e && *e != '\n'))
3631                 return -EINVAL;
3632
3633         if (mddev->pers) {
3634                 int err;
3635                 if (mddev->pers->check_reshape == NULL)
3636                         return -EBUSY;
3637                 mddev->new_layout = n;
3638                 err = mddev->pers->check_reshape(mddev);
3639                 if (err) {
3640                         mddev->new_layout = mddev->layout;
3641                         return err;
3642                 }
3643         } else {
3644                 mddev->new_layout = n;
3645                 if (mddev->reshape_position == MaxSector)
3646                         mddev->layout = n;
3647         }
3648         return len;
3649 }
3650 static struct md_sysfs_entry md_layout =
3651 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3652
3653
3654 static ssize_t
3655 raid_disks_show(struct mddev *mddev, char *page)
3656 {
3657         if (mddev->raid_disks == 0)
3658                 return 0;
3659         if (mddev->reshape_position != MaxSector &&
3660             mddev->delta_disks != 0)
3661                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3662                                mddev->raid_disks - mddev->delta_disks);
3663         return sprintf(page, "%d\n", mddev->raid_disks);
3664 }
3665
3666 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3667
3668 static ssize_t
3669 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3670 {
3671         char *e;
3672         int rv = 0;
3673         unsigned long n = simple_strtoul(buf, &e, 10);
3674
3675         if (!*buf || (*e && *e != '\n'))
3676                 return -EINVAL;
3677
3678         if (mddev->pers)
3679                 rv = update_raid_disks(mddev, n);
3680         else if (mddev->reshape_position != MaxSector) {
3681                 struct md_rdev *rdev;
3682                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3683
3684                 rdev_for_each(rdev, mddev) {
3685                         if (olddisks < n &&
3686                             rdev->data_offset < rdev->new_data_offset)
3687                                 return -EINVAL;
3688                         if (olddisks > n &&
3689                             rdev->data_offset > rdev->new_data_offset)
3690                                 return -EINVAL;
3691                 }
3692                 mddev->delta_disks = n - olddisks;
3693                 mddev->raid_disks = n;
3694                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3695         } else
3696                 mddev->raid_disks = n;
3697         return rv ? rv : len;
3698 }
3699 static struct md_sysfs_entry md_raid_disks =
3700 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3701
3702 static ssize_t
3703 chunk_size_show(struct mddev *mddev, char *page)
3704 {
3705         if (mddev->reshape_position != MaxSector &&
3706             mddev->chunk_sectors != mddev->new_chunk_sectors)
3707                 return sprintf(page, "%d (%d)\n",
3708                                mddev->new_chunk_sectors << 9,
3709                                mddev->chunk_sectors << 9);
3710         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3711 }
3712
3713 static ssize_t
3714 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3715 {
3716         char *e;
3717         unsigned long n = simple_strtoul(buf, &e, 10);
3718
3719         if (!*buf || (*e && *e != '\n'))
3720                 return -EINVAL;
3721
3722         if (mddev->pers) {
3723                 int err;
3724                 if (mddev->pers->check_reshape == NULL)
3725                         return -EBUSY;
3726                 mddev->new_chunk_sectors = n >> 9;
3727                 err = mddev->pers->check_reshape(mddev);
3728                 if (err) {
3729                         mddev->new_chunk_sectors = mddev->chunk_sectors;
3730                         return err;
3731                 }
3732         } else {
3733                 mddev->new_chunk_sectors = n >> 9;
3734                 if (mddev->reshape_position == MaxSector)
3735                         mddev->chunk_sectors = n >> 9;
3736         }
3737         return len;
3738 }
3739 static struct md_sysfs_entry md_chunk_size =
3740 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3741
3742 static ssize_t
3743 resync_start_show(struct mddev *mddev, char *page)
3744 {
3745         if (mddev->recovery_cp == MaxSector)
3746                 return sprintf(page, "none\n");
3747         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3748 }
3749
3750 static ssize_t
3751 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3752 {
3753         char *e;
3754         unsigned long long n = simple_strtoull(buf, &e, 10);
3755
3756         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3757                 return -EBUSY;
3758         if (cmd_match(buf, "none"))
3759                 n = MaxSector;
3760         else if (!*buf || (*e && *e != '\n'))
3761                 return -EINVAL;
3762
3763         mddev->recovery_cp = n;
3764         if (mddev->pers)
3765                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3766         return len;
3767 }
3768 static struct md_sysfs_entry md_resync_start =
3769 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store);
3770
3771 /*
3772  * The array state can be:
3773  *
3774  * clear
3775  *     No devices, no size, no level
3776  *     Equivalent to STOP_ARRAY ioctl
3777  * inactive
3778  *     May have some settings, but array is not active
3779  *        all IO results in error
3780  *     When written, doesn't tear down array, but just stops it
3781  * suspended (not supported yet)
3782  *     All IO requests will block. The array can be reconfigured.
3783  *     Writing this, if accepted, will block until array is quiescent
3784  * readonly
3785  *     no resync can happen.  no superblocks get written.
3786  *     write requests fail
3787  * read-auto
3788  *     like readonly, but behaves like 'clean' on a write request.
3789  *
3790  * clean - no pending writes, but otherwise active.
3791  *     When written to inactive array, starts without resync
3792  *     If a write request arrives then
3793  *       if metadata is known, mark 'dirty' and switch to 'active'.
3794  *       if not known, block and switch to write-pending
3795  *     If written to an active array that has pending writes, then fails.
3796  * active
3797  *     fully active: IO and resync can be happening.
3798  *     When written to inactive array, starts with resync
3799  *
3800  * write-pending
3801  *     clean, but writes are blocked waiting for 'active' to be written.
3802  *
3803  * active-idle
3804  *     like active, but no writes have been seen for a while (100msec).
3805  *
3806  */
3807 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3808                    write_pending, active_idle, bad_word};
3809 static char *array_states[] = {
3810         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3811         "write-pending", "active-idle", NULL };
3812
3813 static int match_word(const char *word, char **list)
3814 {
3815         int n;
3816         for (n=0; list[n]; n++)
3817                 if (cmd_match(word, list[n]))
3818                         break;
3819         return n;
3820 }
3821
3822 static ssize_t
3823 array_state_show(struct mddev *mddev, char *page)
3824 {
3825         enum array_state st = inactive;
3826
3827         if (mddev->pers)
3828                 switch(mddev->ro) {
3829                 case 1:
3830                         st = readonly;
3831                         break;
3832                 case 2:
3833                         st = read_auto;
3834                         break;
3835                 case 0:
3836                         if (mddev->in_sync)
3837                                 st = clean;
3838                         else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3839                                 st = write_pending;
3840                         else if (mddev->safemode)
3841                                 st = active_idle;
3842                         else
3843                                 st = active;
3844                 }
3845         else {
3846                 if (list_empty(&mddev->disks) &&
3847                     mddev->raid_disks == 0 &&
3848                     mddev->dev_sectors == 0)
3849                         st = clear;
3850                 else
3851                         st = inactive;
3852         }
3853         return sprintf(page, "%s\n", array_states[st]);
3854 }
3855
3856 static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
3857 static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
3858 static int do_md_run(struct mddev * mddev);
3859 static int restart_array(struct mddev *mddev);
3860
3861 static ssize_t
3862 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3863 {
3864         int err = -EINVAL;
3865         enum array_state st = match_word(buf, array_states);
3866         switch(st) {
3867         case bad_word:
3868                 break;
3869         case clear:
3870                 /* stopping an active array */
3871                 err = do_md_stop(mddev, 0, NULL);
3872                 break;
3873         case inactive:
3874                 /* stopping an active array */
3875                 if (mddev->pers)
3876                         err = do_md_stop(mddev, 2, NULL);
3877                 else
3878                         err = 0; /* already inactive */
3879                 break;
3880         case suspended:
3881                 break; /* not supported yet */
3882         case readonly:
3883                 if (mddev->pers)
3884                         err = md_set_readonly(mddev, NULL);
3885                 else {
3886                         mddev->ro = 1;
3887                         set_disk_ro(mddev->gendisk, 1);
3888                         err = do_md_run(mddev);
3889                 }
3890                 break;
3891         case read_auto:
3892                 if (mddev->pers) {
3893                         if (mddev->ro == 0)
3894                                 err = md_set_readonly(mddev, NULL);
3895                         else if (mddev->ro == 1)
3896                                 err = restart_array(mddev);
3897                         if (err == 0) {
3898                                 mddev->ro = 2;
3899                                 set_disk_ro(mddev->gendisk, 0);
3900                         }
3901                 } else {
3902                         mddev->ro = 2;
3903                         err = do_md_run(mddev);
3904                 }
3905                 break;
3906         case clean:
3907                 if (mddev->pers) {
3908                         restart_array(mddev);
3909                         spin_lock_irq(&mddev->write_lock);
3910                         if (atomic_read(&mddev->writes_pending) == 0) {
3911                                 if (mddev->in_sync == 0) {
3912                                         mddev->in_sync = 1;
3913                                         if (mddev->safemode == 1)
3914                                                 mddev->safemode = 0;
3915                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3916                                 }
3917                                 err = 0;
3918                         } else
3919                                 err = -EBUSY;
3920                         spin_unlock_irq(&mddev->write_lock);
3921                 } else
3922                         err = -EINVAL;
3923                 break;
3924         case active:
3925                 if (mddev->pers) {
3926                         restart_array(mddev);
3927                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3928                         wake_up(&mddev->sb_wait);
3929                         err = 0;
3930                 } else {
3931                         mddev->ro = 0;
3932                         set_disk_ro(mddev->gendisk, 0);
3933                         err = do_md_run(mddev);
3934                 }
3935                 break;
3936         case write_pending:
3937         case active_idle:
3938                 /* these cannot be set */
3939                 break;
3940         }
3941         if (err)
3942                 return err;
3943         else {
3944                 if (mddev->hold_active == UNTIL_IOCTL)
3945                         mddev->hold_active = 0;
3946                 sysfs_notify_dirent_safe(mddev->sysfs_state);
3947                 return len;
3948         }
3949 }
3950 static struct md_sysfs_entry md_array_state =
3951 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
3952
3953 static ssize_t
3954 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
3955         return sprintf(page, "%d\n",
3956                        atomic_read(&mddev->max_corr_read_errors));
3957 }
3958
3959 static ssize_t
3960 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
3961 {
3962         char *e;
3963         unsigned long n = simple_strtoul(buf, &e, 10);
3964
3965         if (*buf && (*e == 0 || *e == '\n')) {
3966                 atomic_set(&mddev->max_corr_read_errors, n);
3967                 return len;
3968         }
3969         return -EINVAL;
3970 }
3971
3972 static struct md_sysfs_entry max_corr_read_errors =
3973 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
3974         max_corrected_read_errors_store);
3975
3976 static ssize_t
3977 null_show(struct mddev *mddev, char *page)
3978 {
3979         return -EINVAL;
3980 }
3981
3982 static ssize_t
3983 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
3984 {
3985         /* buf must be %d:%d\n? giving major and minor numbers */
3986         /* The new device is added to the array.
3987          * If the array has a persistent superblock, we read the
3988          * superblock to initialise info and check validity.
3989          * Otherwise, only checking done is that in bind_rdev_to_array,
3990          * which mainly checks size.
3991          */
3992         char *e;
3993         int major = simple_strtoul(buf, &e, 10);
3994         int minor;
3995         dev_t dev;
3996         struct md_rdev *rdev;
3997         int err;
3998
3999         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4000                 return -EINVAL;
4001         minor = simple_strtoul(e+1, &e, 10);
4002         if (*e && *e != '\n')
4003                 return -EINVAL;
4004         dev = MKDEV(major, minor);
4005         if (major != MAJOR(dev) ||
4006             minor != MINOR(dev))
4007                 return -EOVERFLOW;
4008
4009
4010         if (mddev->persistent) {
4011                 rdev = md_import_device(dev, mddev->major_version,
4012                                         mddev->minor_version);
4013                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4014                         struct md_rdev *rdev0
4015                                 = list_entry(mddev->disks.next,
4016                                              struct md_rdev, same_set);
4017                         err = super_types[mddev->major_version]
4018                                 .load_super(rdev, rdev0, mddev->minor_version);
4019                         if (err < 0)
4020                                 goto out;
4021                 }
4022         } else if (mddev->external)
4023                 rdev = md_import_device(dev, -2, -1);
4024         else
4025                 rdev = md_import_device(dev, -1, -1);
4026
4027         if (IS_ERR(rdev))
4028                 return PTR_ERR(rdev);
4029         err = bind_rdev_to_array(rdev, mddev);
4030  out:
4031         if (err)
4032                 export_rdev(rdev);
4033         return err ? err : len;
4034 }
4035
4036 static struct md_sysfs_entry md_new_device =
4037 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4038
4039 static ssize_t
4040 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4041 {
4042         char *end;
4043         unsigned long chunk, end_chunk;
4044
4045         if (!mddev->bitmap)
4046                 goto out;
4047         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4048         while (*buf) {
4049                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4050                 if (buf == end) break;
4051                 if (*end == '-') { /* range */
4052                         buf = end + 1;
4053                         end_chunk = simple_strtoul(buf, &end, 0);
4054                         if (buf == end) break;
4055                 }
4056                 if (*end && !isspace(*end)) break;
4057                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4058                 buf = skip_spaces(end);
4059         }
4060         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4061 out:
4062         return len;
4063 }
4064
4065 static struct md_sysfs_entry md_bitmap =
4066 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4067
4068 static ssize_t
4069 size_show(struct mddev *mddev, char *page)
4070 {
4071         return sprintf(page, "%llu\n",
4072                 (unsigned long long)mddev->dev_sectors / 2);
4073 }
4074
4075 static int update_size(struct mddev *mddev, sector_t num_sectors);
4076
4077 static ssize_t
4078 size_store(struct mddev *mddev, const char *buf, size_t len)
4079 {
4080         /* If array is inactive, we can reduce the component size, but
4081          * not increase it (except from 0).
4082          * If array is active, we can try an on-line resize
4083          */
4084         sector_t sectors;
4085         int err = strict_blocks_to_sectors(buf, &sectors);
4086
4087         if (err < 0)
4088                 return err;
4089         if (mddev->pers) {
4090                 err = update_size(mddev, sectors);
4091                 md_update_sb(mddev, 1);
4092         } else {
4093                 if (mddev->dev_sectors == 0 ||
4094                     mddev->dev_sectors > sectors)
4095                         mddev->dev_sectors = sectors;
4096                 else
4097                         err = -ENOSPC;
4098         }
4099         return err ? err : len;
4100 }
4101
4102 static struct md_sysfs_entry md_size =
4103 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4104
4105
4106 /* Metadata version.
4107  * This is one of
4108  *   'none' for arrays with no metadata (good luck...)
4109  *   'external' for arrays with externally managed metadata,
4110  * or N.M for internally known formats
4111  */
4112 static ssize_t
4113 metadata_show(struct mddev *mddev, char *page)
4114 {
4115         if (mddev->persistent)
4116                 return sprintf(page, "%d.%d\n",
4117                                mddev->major_version, mddev->minor_version);
4118         else if (mddev->external)
4119                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4120         else
4121                 return sprintf(page, "none\n");
4122 }
4123
4124 static ssize_t
4125 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4126 {
4127         int major, minor;
4128         char *e;
4129         /* Changing the details of 'external' metadata is
4130          * always permitted.  Otherwise there must be
4131          * no devices attached to the array.
4132          */
4133         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4134                 ;
4135         else if (!list_empty(&mddev->disks))
4136                 return -EBUSY;
4137
4138         if (cmd_match(buf, "none")) {
4139                 mddev->persistent = 0;
4140                 mddev->external = 0;
4141                 mddev->major_version = 0;
4142                 mddev->minor_version = 90;
4143                 return len;
4144         }
4145         if (strncmp(buf, "external:", 9) == 0) {
4146                 size_t namelen = len-9;
4147                 if (namelen >= sizeof(mddev->metadata_type))
4148                         namelen = sizeof(mddev->metadata_type)-1;
4149                 strncpy(mddev->metadata_type, buf+9, namelen);
4150                 mddev->metadata_type[namelen] = 0;
4151                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4152                         mddev->metadata_type[--namelen] = 0;
4153                 mddev->persistent = 0;
4154                 mddev->external = 1;
4155                 mddev->major_version = 0;
4156                 mddev->minor_version = 90;
4157                 return len;
4158         }
4159         major = simple_strtoul(buf, &e, 10);
4160         if (e==buf || *e != '.')
4161                 return -EINVAL;
4162         buf = e+1;
4163         minor = simple_strtoul(buf, &e, 10);
4164         if (e==buf || (*e && *e != '\n') )
4165                 return -EINVAL;
4166         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4167                 return -ENOENT;
4168         mddev->major_version = major;
4169         mddev->minor_version = minor;
4170         mddev->persistent = 1;
4171         mddev->external = 0;
4172         return len;
4173 }
4174
4175 static struct md_sysfs_entry md_metadata =
4176 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4177
4178 static ssize_t
4179 action_show(struct mddev *mddev, char *page)
4180 {
4181         char *type = "idle";
4182         if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4183                 type = "frozen";
4184         else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4185             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
4186                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4187                         type = "reshape";
4188                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4189                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
4190                                 type = "resync";
4191                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
4192                                 type = "check";
4193                         else
4194                                 type = "repair";
4195                 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
4196                         type = "recover";
4197         }
4198         return sprintf(page, "%s\n", type);
4199 }
4200
4201 static ssize_t
4202 action_store(struct mddev *mddev, const char *page, size_t len)
4203 {
4204         if (!mddev->pers || !mddev->pers->sync_request)
4205                 return -EINVAL;
4206
4207         if (cmd_match(page, "frozen"))
4208                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4209         else
4210                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4211
4212         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4213                 if (mddev->sync_thread) {
4214                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4215                         md_reap_sync_thread(mddev);
4216                 }
4217         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4218                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
4219                 return -EBUSY;
4220         else if (cmd_match(page, "resync"))
4221                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4222         else if (cmd_match(page, "recover")) {
4223                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4224                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4225         } else if (cmd_match(page, "reshape")) {
4226                 int err;
4227                 if (mddev->pers->start_reshape == NULL)
4228                         return -EINVAL;
4229                 err = mddev->pers->start_reshape(mddev);
4230                 if (err)
4231                         return err;
4232                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4233         } else {
4234                 if (cmd_match(page, "check"))
4235                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4236                 else if (!cmd_match(page, "repair"))
4237                         return -EINVAL;
4238                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4239                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4240         }
4241         if (mddev->ro == 2) {
4242                 /* A write to sync_action is enough to justify
4243                  * canceling read-auto mode
4244                  */
4245                 mddev->ro = 0;
4246                 md_wakeup_thread(mddev->sync_thread);
4247         }
4248         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4249         md_wakeup_thread(mddev->thread);
4250         sysfs_notify_dirent_safe(mddev->sysfs_action);
4251         return len;
4252 }
4253
4254 static struct md_sysfs_entry md_scan_mode =
4255 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4256
4257 static ssize_t
4258 last_sync_action_show(struct mddev *mddev, char *page)
4259 {
4260         return sprintf(page, "%s\n", mddev->last_sync_action);
4261 }
4262
4263 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4264
4265 static ssize_t
4266 mismatch_cnt_show(struct mddev *mddev, char *page)
4267 {
4268         return sprintf(page, "%llu\n",
4269                        (unsigned long long)
4270                        atomic64_read(&mddev->resync_mismatches));
4271 }
4272
4273 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4274
4275 static ssize_t
4276 sync_min_show(struct mddev *mddev, char *page)
4277 {
4278         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4279                        mddev->sync_speed_min ? "local": "system");
4280 }
4281
4282 static ssize_t
4283 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4284 {
4285         int min;
4286         char *e;
4287         if (strncmp(buf, "system", 6)==0) {
4288                 mddev->sync_speed_min = 0;
4289                 return len;
4290         }
4291         min = simple_strtoul(buf, &e, 10);
4292         if (buf == e || (*e && *e != '\n') || min <= 0)
4293                 return -EINVAL;
4294         mddev->sync_speed_min = min;
4295         return len;
4296 }
4297
4298 static struct md_sysfs_entry md_sync_min =
4299 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4300
4301 static ssize_t
4302 sync_max_show(struct mddev *mddev, char *page)
4303 {
4304         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4305                        mddev->sync_speed_max ? "local": "system");
4306 }
4307
4308 static ssize_t
4309 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4310 {
4311         int max;
4312         char *e;
4313         if (strncmp(buf, "system", 6)==0) {
4314                 mddev->sync_speed_max = 0;
4315                 return len;
4316         }
4317         max = simple_strtoul(buf, &e, 10);
4318         if (buf == e || (*e && *e != '\n') || max <= 0)
4319                 return -EINVAL;
4320         mddev->sync_speed_max = max;
4321         return len;
4322 }
4323
4324 static struct md_sysfs_entry md_sync_max =
4325 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4326
4327 static ssize_t
4328 degraded_show(struct mddev *mddev, char *page)
4329 {
4330         return sprintf(page, "%d\n", mddev->degraded);
4331 }
4332 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4333
4334 static ssize_t
4335 sync_force_parallel_show(struct mddev *mddev, char *page)
4336 {
4337         return sprintf(page, "%d\n", mddev->parallel_resync);
4338 }
4339
4340 static ssize_t
4341 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4342 {
4343         long n;
4344
4345         if (kstrtol(buf, 10, &n))
4346                 return -EINVAL;
4347
4348         if (n != 0 && n != 1)
4349                 return -EINVAL;
4350
4351         mddev->parallel_resync = n;
4352
4353         if (mddev->sync_thread)
4354                 wake_up(&resync_wait);
4355
4356         return len;
4357 }
4358
4359 /* force parallel resync, even with shared block devices */
4360 static struct md_sysfs_entry md_sync_force_parallel =
4361 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4362        sync_force_parallel_show, sync_force_parallel_store);
4363
4364 static ssize_t
4365 sync_speed_show(struct mddev *mddev, char *page)
4366 {
4367         unsigned long resync, dt, db;
4368         if (mddev->curr_resync == 0)
4369                 return sprintf(page, "none\n");
4370         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4371         dt = (jiffies - mddev->resync_mark) / HZ;
4372         if (!dt) dt++;
4373         db = resync - mddev->resync_mark_cnt;
4374         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4375 }
4376
4377 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4378
4379 static ssize_t
4380 sync_completed_show(struct mddev *mddev, char *page)
4381 {
4382         unsigned long long max_sectors, resync;
4383
4384         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4385                 return sprintf(page, "none\n");
4386
4387         if (mddev->curr_resync == 1 ||
4388             mddev->curr_resync == 2)
4389                 return sprintf(page, "delayed\n");
4390
4391         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4392             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4393                 max_sectors = mddev->resync_max_sectors;
4394         else
4395                 max_sectors = mddev->dev_sectors;
4396
4397         resync = mddev->curr_resync_completed;
4398         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4399 }
4400
4401 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
4402
4403 static ssize_t
4404 min_sync_show(struct mddev *mddev, char *page)
4405 {
4406         return sprintf(page, "%llu\n",
4407                        (unsigned long long)mddev->resync_min);
4408 }
4409 static ssize_t
4410 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4411 {
4412         unsigned long long min;
4413         if (kstrtoull(buf, 10, &min))
4414                 return -EINVAL;
4415         if (min > mddev->resync_max)
4416                 return -EINVAL;
4417         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4418                 return -EBUSY;
4419
4420         /* Must be a multiple of chunk_size */
4421         if (mddev->chunk_sectors) {
4422                 sector_t temp = min;
4423                 if (sector_div(temp, mddev->chunk_sectors))
4424                         return -EINVAL;
4425         }
4426         mddev->resync_min = min;
4427
4428         return len;
4429 }
4430
4431 static struct md_sysfs_entry md_min_sync =
4432 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4433
4434 static ssize_t
4435 max_sync_show(struct mddev *mddev, char *page)
4436 {
4437         if (mddev->resync_max == MaxSector)
4438                 return sprintf(page, "max\n");
4439         else
4440                 return sprintf(page, "%llu\n",
4441                                (unsigned long long)mddev->resync_max);
4442 }
4443 static ssize_t
4444 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4445 {
4446         if (strncmp(buf, "max", 3) == 0)
4447                 mddev->resync_max = MaxSector;
4448         else {
4449                 unsigned long long max;
4450                 if (kstrtoull(buf, 10, &max))
4451                         return -EINVAL;
4452                 if (max < mddev->resync_min)
4453                         return -EINVAL;
4454                 if (max < mddev->resync_max &&
4455                     mddev->ro == 0 &&
4456                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4457                         return -EBUSY;
4458
4459                 /* Must be a multiple of chunk_size */
4460                 if (mddev->chunk_sectors) {
4461                         sector_t temp = max;
4462                         if (sector_div(temp, mddev->chunk_sectors))
4463                                 return -EINVAL;
4464                 }
4465                 mddev->resync_max = max;
4466         }
4467         wake_up(&mddev->recovery_wait);
4468         return len;
4469 }
4470
4471 static struct md_sysfs_entry md_max_sync =
4472 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4473
4474 static ssize_t
4475 suspend_lo_show(struct mddev *mddev, char *page)
4476 {
4477         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4478 }
4479
4480 static ssize_t
4481 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4482 {
4483         char *e;
4484         unsigned long long new = simple_strtoull(buf, &e, 10);
4485         unsigned long long old = mddev->suspend_lo;
4486
4487         if (mddev->pers == NULL || 
4488             mddev->pers->quiesce == NULL)
4489                 return -EINVAL;
4490         if (buf == e || (*e && *e != '\n'))
4491                 return -EINVAL;
4492
4493         mddev->suspend_lo = new;
4494         if (new >= old)
4495                 /* Shrinking suspended region */
4496                 mddev->pers->quiesce(mddev, 2);
4497         else {
4498                 /* Expanding suspended region - need to wait */
4499                 mddev->pers->quiesce(mddev, 1);
4500                 mddev->pers->quiesce(mddev, 0);
4501         }
4502         return len;
4503 }
4504 static struct md_sysfs_entry md_suspend_lo =
4505 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4506
4507
4508 static ssize_t
4509 suspend_hi_show(struct mddev *mddev, char *page)
4510 {
4511         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4512 }
4513
4514 static ssize_t
4515 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4516 {
4517         char *e;
4518         unsigned long long new = simple_strtoull(buf, &e, 10);
4519         unsigned long long old = mddev->suspend_hi;
4520
4521         if (mddev->pers == NULL ||
4522             mddev->pers->quiesce == NULL)
4523                 return -EINVAL;
4524         if (buf == e || (*e && *e != '\n'))
4525                 return -EINVAL;
4526
4527         mddev->suspend_hi = new;
4528         if (new <= old)
4529                 /* Shrinking suspended region */
4530                 mddev->pers->quiesce(mddev, 2);
4531         else {
4532                 /* Expanding suspended region - need to wait */
4533                 mddev->pers->quiesce(mddev, 1);
4534                 mddev->pers->quiesce(mddev, 0);
4535         }
4536         return len;
4537 }
4538 static struct md_sysfs_entry md_suspend_hi =
4539 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4540
4541 static ssize_t
4542 reshape_position_show(struct mddev *mddev, char *page)
4543 {
4544         if (mddev->reshape_position != MaxSector)
4545                 return sprintf(page, "%llu\n",
4546                                (unsigned long long)mddev->reshape_position);
4547         strcpy(page, "none\n");
4548         return 5;
4549 }
4550
4551 static ssize_t
4552 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4553 {
4554         struct md_rdev *rdev;
4555         char *e;
4556         unsigned long long new = simple_strtoull(buf, &e, 10);
4557         if (mddev->pers)
4558                 return -EBUSY;
4559         if (buf == e || (*e && *e != '\n'))
4560                 return -EINVAL;
4561         mddev->reshape_position = new;
4562         mddev->delta_disks = 0;
4563         mddev->reshape_backwards = 0;
4564         mddev->new_level = mddev->level;
4565         mddev->new_layout = mddev->layout;
4566         mddev->new_chunk_sectors = mddev->chunk_sectors;
4567         rdev_for_each(rdev, mddev)
4568                 rdev->new_data_offset = rdev->data_offset;
4569         return len;
4570 }
4571
4572 static struct md_sysfs_entry md_reshape_position =
4573 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4574        reshape_position_store);
4575
4576 static ssize_t
4577 reshape_direction_show(struct mddev *mddev, char *page)
4578 {
4579         return sprintf(page, "%s\n",
4580                        mddev->reshape_backwards ? "backwards" : "forwards");
4581 }
4582
4583 static ssize_t
4584 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4585 {
4586         int backwards = 0;
4587         if (cmd_match(buf, "forwards"))
4588                 backwards = 0;
4589         else if (cmd_match(buf, "backwards"))
4590                 backwards = 1;
4591         else
4592                 return -EINVAL;
4593         if (mddev->reshape_backwards == backwards)
4594                 return len;
4595
4596         /* check if we are allowed to change */
4597         if (mddev->delta_disks)
4598                 return -EBUSY;
4599
4600         if (mddev->persistent &&
4601             mddev->major_version == 0)
4602                 return -EINVAL;
4603
4604         mddev->reshape_backwards = backwards;
4605         return len;
4606 }
4607
4608 static struct md_sysfs_entry md_reshape_direction =
4609 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4610        reshape_direction_store);
4611
4612 static ssize_t
4613 array_size_show(struct mddev *mddev, char *page)
4614 {
4615         if (mddev->external_size)
4616                 return sprintf(page, "%llu\n",
4617                                (unsigned long long)mddev->array_sectors/2);
4618         else
4619                 return sprintf(page, "default\n");
4620 }
4621
4622 static ssize_t
4623 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4624 {
4625         sector_t sectors;
4626
4627         if (strncmp(buf, "default", 7) == 0) {
4628                 if (mddev->pers)
4629                         sectors = mddev->pers->size(mddev, 0, 0);
4630                 else
4631                         sectors = mddev->array_sectors;
4632
4633                 mddev->external_size = 0;
4634         } else {
4635                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4636                         return -EINVAL;
4637                 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4638                         return -E2BIG;
4639
4640                 mddev->external_size = 1;
4641         }
4642
4643         mddev->array_sectors = sectors;
4644         if (mddev->pers) {
4645                 set_capacity(mddev->gendisk, mddev->array_sectors);
4646                 revalidate_disk(mddev->gendisk);
4647         }
4648         return len;
4649 }
4650
4651 static struct md_sysfs_entry md_array_size =
4652 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4653        array_size_store);
4654
4655 static struct attribute *md_default_attrs[] = {
4656         &md_level.attr,
4657         &md_layout.attr,
4658         &md_raid_disks.attr,
4659         &md_chunk_size.attr,
4660         &md_size.attr,
4661         &md_resync_start.attr,
4662         &md_metadata.attr,
4663         &md_new_device.attr,
4664         &md_safe_delay.attr,
4665         &md_array_state.attr,
4666         &md_reshape_position.attr,
4667         &md_reshape_direction.attr,
4668         &md_array_size.attr,
4669         &max_corr_read_errors.attr,
4670         NULL,
4671 };
4672
4673 static struct attribute *md_redundancy_attrs[] = {
4674         &md_scan_mode.attr,
4675         &md_last_scan_mode.attr,
4676         &md_mismatches.attr,
4677         &md_sync_min.attr,
4678         &md_sync_max.attr,
4679         &md_sync_speed.attr,
4680         &md_sync_force_parallel.attr,
4681         &md_sync_completed.attr,
4682         &md_min_sync.attr,
4683         &md_max_sync.attr,
4684         &md_suspend_lo.attr,
4685         &md_suspend_hi.attr,
4686         &md_bitmap.attr,
4687         &md_degraded.attr,
4688         NULL,
4689 };
4690 static struct attribute_group md_redundancy_group = {
4691         .name = NULL,
4692         .attrs = md_redundancy_attrs,
4693 };
4694
4695
4696 static ssize_t
4697 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4698 {
4699         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4700         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4701         ssize_t rv;
4702
4703         if (!entry->show)
4704                 return -EIO;
4705         spin_lock(&all_mddevs_lock);
4706         if (list_empty(&mddev->all_mddevs)) {
4707                 spin_unlock(&all_mddevs_lock);
4708                 return -EBUSY;
4709         }
4710         mddev_get(mddev);
4711         spin_unlock(&all_mddevs_lock);
4712
4713         rv = mddev_lock(mddev);
4714         if (!rv) {
4715                 rv = entry->show(mddev, page);
4716                 mddev_unlock(mddev);
4717         }
4718         mddev_put(mddev);
4719         return rv;
4720 }
4721
4722 static ssize_t
4723 md_attr_store(struct kobject *kobj, struct attribute *attr,
4724               const char *page, size_t length)
4725 {
4726         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4727         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4728         ssize_t rv;
4729
4730         if (!entry->store)
4731                 return -EIO;
4732         if (!capable(CAP_SYS_ADMIN))
4733                 return -EACCES;
4734         spin_lock(&all_mddevs_lock);
4735         if (list_empty(&mddev->all_mddevs)) {
4736                 spin_unlock(&all_mddevs_lock);
4737                 return -EBUSY;
4738         }
4739         mddev_get(mddev);
4740         spin_unlock(&all_mddevs_lock);
4741         if (entry->store == new_dev_store)
4742                 flush_workqueue(md_misc_wq);
4743         rv = mddev_lock(mddev);
4744         if (!rv) {
4745                 rv = entry->store(mddev, page, length);
4746                 mddev_unlock(mddev);
4747         }
4748         mddev_put(mddev);
4749         return rv;
4750 }
4751
4752 static void md_free(struct kobject *ko)
4753 {
4754         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4755
4756         if (mddev->sysfs_state)
4757                 sysfs_put(mddev->sysfs_state);
4758
4759         if (mddev->gendisk) {
4760                 del_gendisk(mddev->gendisk);
4761                 put_disk(mddev->gendisk);
4762         }
4763         if (mddev->queue)
4764                 blk_cleanup_queue(mddev->queue);
4765
4766         kfree(mddev);
4767 }
4768
4769 static const struct sysfs_ops md_sysfs_ops = {
4770         .show   = md_attr_show,
4771         .store  = md_attr_store,
4772 };
4773 static struct kobj_type md_ktype = {
4774         .release        = md_free,
4775         .sysfs_ops      = &md_sysfs_ops,
4776         .default_attrs  = md_default_attrs,
4777 };
4778
4779 int mdp_major = 0;
4780
4781 static void mddev_delayed_delete(struct work_struct *ws)
4782 {
4783         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4784
4785         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4786         kobject_del(&mddev->kobj);
4787         kobject_put(&mddev->kobj);
4788 }
4789
4790 static int md_alloc(dev_t dev, char *name)
4791 {
4792         static DEFINE_MUTEX(disks_mutex);
4793         struct mddev *mddev = mddev_find(dev);
4794         struct gendisk *disk;
4795         int partitioned;
4796         int shift;
4797         int unit;
4798         int error;
4799
4800         if (!mddev)
4801                 return -ENODEV;
4802
4803         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4804         shift = partitioned ? MdpMinorShift : 0;
4805         unit = MINOR(mddev->unit) >> shift;
4806
4807         /* wait for any previous instance of this device to be
4808          * completely removed (mddev_delayed_delete).
4809          */
4810         flush_workqueue(md_misc_wq);
4811
4812         mutex_lock(&disks_mutex);
4813         error = -EEXIST;
4814         if (mddev->gendisk)
4815                 goto abort;
4816
4817         if (name) {
4818                 /* Need to ensure that 'name' is not a duplicate.
4819                  */
4820                 struct mddev *mddev2;
4821                 spin_lock(&all_mddevs_lock);
4822
4823                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
4824                         if (mddev2->gendisk &&
4825                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
4826                                 spin_unlock(&all_mddevs_lock);
4827                                 goto abort;
4828                         }
4829                 spin_unlock(&all_mddevs_lock);
4830         }
4831
4832         error = -ENOMEM;
4833         mddev->queue = blk_alloc_queue(GFP_KERNEL);
4834         if (!mddev->queue)
4835                 goto abort;
4836         mddev->queue->queuedata = mddev;
4837
4838         blk_queue_make_request(mddev->queue, md_make_request);
4839         blk_set_stacking_limits(&mddev->queue->limits);
4840
4841         disk = alloc_disk(1 << shift);
4842         if (!disk) {
4843                 blk_cleanup_queue(mddev->queue);
4844                 mddev->queue = NULL;
4845                 goto abort;
4846         }
4847         disk->major = MAJOR(mddev->unit);
4848         disk->first_minor = unit << shift;
4849         if (name)
4850                 strcpy(disk->disk_name, name);
4851         else if (partitioned)
4852                 sprintf(disk->disk_name, "md_d%d", unit);
4853         else
4854                 sprintf(disk->disk_name, "md%d", unit);
4855         disk->fops = &md_fops;
4856         disk->private_data = mddev;
4857         disk->queue = mddev->queue;
4858         blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4859         /* Allow extended partitions.  This makes the
4860          * 'mdp' device redundant, but we can't really
4861          * remove it now.
4862          */
4863         disk->flags |= GENHD_FL_EXT_DEVT;
4864         mddev->gendisk = disk;
4865         /* As soon as we call add_disk(), another thread could get
4866          * through to md_open, so make sure it doesn't get too far
4867          */
4868         mutex_lock(&mddev->open_mutex);
4869         add_disk(disk);
4870
4871         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
4872                                      &disk_to_dev(disk)->kobj, "%s", "md");
4873         if (error) {
4874                 /* This isn't possible, but as kobject_init_and_add is marked
4875                  * __must_check, we must do something with the result
4876                  */
4877                 printk(KERN_WARNING "md: cannot register %s/md - name in use\n",
4878                        disk->disk_name);
4879                 error = 0;
4880         }
4881         if (mddev->kobj.sd &&
4882             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4883                 printk(KERN_DEBUG "pointless warning\n");
4884         mutex_unlock(&mddev->open_mutex);
4885  abort:
4886         mutex_unlock(&disks_mutex);
4887         if (!error && mddev->kobj.sd) {
4888                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
4889                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
4890         }
4891         mddev_put(mddev);
4892         return error;
4893 }
4894
4895 static struct kobject *md_probe(dev_t dev, int *part, void *data)
4896 {
4897         md_alloc(dev, NULL);
4898         return NULL;
4899 }
4900
4901 static int add_named_array(const char *val, struct kernel_param *kp)
4902 {
4903         /* val must be "md_*" where * is not all digits.
4904          * We allocate an array with a large free minor number, and
4905          * set the name to val.  val must not already be an active name.
4906          */
4907         int len = strlen(val);
4908         char buf[DISK_NAME_LEN];
4909
4910         while (len && val[len-1] == '\n')
4911                 len--;
4912         if (len >= DISK_NAME_LEN)
4913                 return -E2BIG;
4914         strlcpy(buf, val, len+1);
4915         if (strncmp(buf, "md_", 3) != 0)
4916                 return -EINVAL;
4917         return md_alloc(0, buf);
4918 }
4919
4920 static void md_safemode_timeout(unsigned long data)
4921 {
4922         struct mddev *mddev = (struct mddev *) data;
4923
4924         if (!atomic_read(&mddev->writes_pending)) {
4925                 mddev->safemode = 1;
4926                 if (mddev->external)
4927                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4928         }
4929         md_wakeup_thread(mddev->thread);
4930 }
4931
4932 static int start_dirty_degraded;
4933
4934 int md_run(struct mddev *mddev)
4935 {
4936         int err;
4937         struct md_rdev *rdev;
4938         struct md_personality *pers;
4939
4940         if (list_empty(&mddev->disks))
4941                 /* cannot run an array with no devices.. */
4942                 return -EINVAL;
4943
4944         if (mddev->pers)
4945                 return -EBUSY;
4946         /* Cannot run until previous stop completes properly */
4947         if (mddev->sysfs_active)
4948                 return -EBUSY;
4949
4950         /*
4951          * Analyze all RAID superblock(s)
4952          */
4953         if (!mddev->raid_disks) {
4954                 if (!mddev->persistent)
4955                         return -EINVAL;
4956                 analyze_sbs(mddev);
4957         }
4958
4959         if (mddev->level != LEVEL_NONE)
4960                 request_module("md-level-%d", mddev->level);
4961         else if (mddev->clevel[0])
4962                 request_module("md-%s", mddev->clevel);
4963
4964         /*
4965          * Drop all container device buffers, from now on
4966          * the only valid external interface is through the md
4967          * device.
4968          */
4969         rdev_for_each(rdev, mddev) {
4970                 if (test_bit(Faulty, &rdev->flags))
4971                         continue;
4972                 sync_blockdev(rdev->bdev);
4973                 invalidate_bdev(rdev->bdev);
4974
4975                 /* perform some consistency tests on the device.
4976                  * We don't want the data to overlap the metadata,
4977                  * Internal Bitmap issues have been handled elsewhere.
4978                  */
4979                 if (rdev->meta_bdev) {
4980                         /* Nothing to check */;
4981                 } else if (rdev->data_offset < rdev->sb_start) {
4982                         if (mddev->dev_sectors &&
4983                             rdev->data_offset + mddev->dev_sectors
4984                             > rdev->sb_start) {
4985                                 printk("md: %s: data overlaps metadata\n",
4986                                        mdname(mddev));
4987                                 return -EINVAL;
4988                         }
4989                 } else {
4990                         if (rdev->sb_start + rdev->sb_size/512
4991                             > rdev->data_offset) {
4992                                 printk("md: %s: metadata overlaps data\n",
4993                                        mdname(mddev));
4994                                 return -EINVAL;
4995                         }
4996                 }
4997                 sysfs_notify_dirent_safe(rdev->sysfs_state);
4998         }
4999
5000         if (mddev->bio_set == NULL)
5001                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5002
5003         spin_lock(&pers_lock);
5004         pers = find_pers(mddev->level, mddev->clevel);
5005         if (!pers || !try_module_get(pers->owner)) {
5006                 spin_unlock(&pers_lock);
5007                 if (mddev->level != LEVEL_NONE)
5008                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
5009                                mddev->level);
5010                 else
5011                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
5012                                mddev->clevel);
5013                 return -EINVAL;
5014         }
5015         mddev->pers = pers;
5016         spin_unlock(&pers_lock);
5017         if (mddev->level != pers->level) {
5018                 mddev->level = pers->level;
5019                 mddev->new_level = pers->level;
5020         }
5021         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5022
5023         if (mddev->reshape_position != MaxSector &&
5024             pers->start_reshape == NULL) {
5025                 /* This personality cannot handle reshaping... */
5026                 mddev->pers = NULL;
5027                 module_put(pers->owner);
5028                 return -EINVAL;
5029         }
5030
5031         if (pers->sync_request) {
5032                 /* Warn if this is a potentially silly
5033                  * configuration.
5034                  */
5035                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5036                 struct md_rdev *rdev2;
5037                 int warned = 0;
5038
5039                 rdev_for_each(rdev, mddev)
5040                         rdev_for_each(rdev2, mddev) {
5041                                 if (rdev < rdev2 &&
5042                                     rdev->bdev->bd_contains ==
5043                                     rdev2->bdev->bd_contains) {
5044                                         printk(KERN_WARNING
5045                                                "%s: WARNING: %s appears to be"
5046                                                " on the same physical disk as"
5047                                                " %s.\n",
5048                                                mdname(mddev),
5049                                                bdevname(rdev->bdev,b),
5050                                                bdevname(rdev2->bdev,b2));
5051                                         warned = 1;
5052                                 }
5053                         }
5054
5055                 if (warned)
5056                         printk(KERN_WARNING
5057                                "True protection against single-disk"
5058                                " failure might be compromised.\n");
5059         }
5060
5061         mddev->recovery = 0;
5062         /* may be over-ridden by personality */
5063         mddev->resync_max_sectors = mddev->dev_sectors;
5064
5065         mddev->ok_start_degraded = start_dirty_degraded;
5066
5067         if (start_readonly && mddev->ro == 0)
5068                 mddev->ro = 2; /* read-only, but switch on first write */
5069
5070         err = mddev->pers->run(mddev);
5071         if (err)
5072                 printk(KERN_ERR "md: pers->run() failed ...\n");
5073         else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) {
5074                 WARN_ONCE(!mddev->external_size, "%s: default size too small,"
5075                           " but 'external_size' not in effect?\n", __func__);
5076                 printk(KERN_ERR
5077                        "md: invalid array_size %llu > default size %llu\n",
5078                        (unsigned long long)mddev->array_sectors / 2,
5079                        (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2);
5080                 err = -EINVAL;
5081                 mddev->pers->stop(mddev);
5082         }
5083         if (err == 0 && mddev->pers->sync_request &&
5084             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5085                 err = bitmap_create(mddev);
5086                 if (err) {
5087                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
5088                                mdname(mddev), err);
5089                         mddev->pers->stop(mddev);
5090                 }
5091         }
5092         if (err) {
5093                 module_put(mddev->pers->owner);
5094                 mddev->pers = NULL;
5095                 bitmap_destroy(mddev);
5096                 return err;
5097         }
5098         if (mddev->pers->sync_request) {
5099                 if (mddev->kobj.sd &&
5100                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5101                         printk(KERN_WARNING
5102                                "md: cannot register extra attributes for %s\n",
5103                                mdname(mddev));
5104                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5105         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5106                 mddev->ro = 0;
5107
5108         atomic_set(&mddev->writes_pending,0);
5109         atomic_set(&mddev->max_corr_read_errors,
5110                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5111         mddev->safemode = 0;
5112         mddev->safemode_timer.function = md_safemode_timeout;
5113         mddev->safemode_timer.data = (unsigned long) mddev;
5114         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5115         mddev->in_sync = 1;
5116         smp_wmb();
5117         mddev->ready = 1;
5118         rdev_for_each(rdev, mddev)
5119                 if (rdev->raid_disk >= 0)
5120                         if (sysfs_link_rdev(mddev, rdev))
5121                                 /* failure here is OK */;
5122         
5123         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5124         
5125         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5126                 md_update_sb(mddev, 0);
5127
5128         md_new_event(mddev);
5129         sysfs_notify_dirent_safe(mddev->sysfs_state);
5130         sysfs_notify_dirent_safe(mddev->sysfs_action);
5131         sysfs_notify(&mddev->kobj, NULL, "degraded");
5132         return 0;
5133 }
5134 EXPORT_SYMBOL_GPL(md_run);
5135
5136 static int do_md_run(struct mddev *mddev)
5137 {
5138         int err;
5139
5140         err = md_run(mddev);
5141         if (err)
5142                 goto out;
5143         err = bitmap_load(mddev);
5144         if (err) {
5145                 bitmap_destroy(mddev);
5146                 goto out;
5147         }
5148
5149         md_wakeup_thread(mddev->thread);
5150         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5151
5152         set_capacity(mddev->gendisk, mddev->array_sectors);
5153         revalidate_disk(mddev->gendisk);
5154         mddev->changed = 1;
5155         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5156 out:
5157         return err;
5158 }
5159
5160 static int restart_array(struct mddev *mddev)
5161 {
5162         struct gendisk *disk = mddev->gendisk;
5163
5164         /* Complain if it has no devices */
5165         if (list_empty(&mddev->disks))
5166                 return -ENXIO;
5167         if (!mddev->pers)
5168                 return -EINVAL;
5169         if (!mddev->ro)
5170                 return -EBUSY;
5171         mddev->safemode = 0;
5172         mddev->ro = 0;
5173         set_disk_ro(disk, 0);
5174         printk(KERN_INFO "md: %s switched to read-write mode.\n",
5175                 mdname(mddev));
5176         /* Kick recovery or resync if necessary */
5177         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5178         md_wakeup_thread(mddev->thread);
5179         md_wakeup_thread(mddev->sync_thread);
5180         sysfs_notify_dirent_safe(mddev->sysfs_state);
5181         return 0;
5182 }
5183
5184 /* similar to deny_write_access, but accounts for our holding a reference
5185  * to the file ourselves */
5186 static int deny_bitmap_write_access(struct file * file)
5187 {
5188         struct inode *inode = file->f_mapping->host;
5189
5190         spin_lock(&inode->i_lock);
5191         if (atomic_read(&inode->i_writecount) > 1) {
5192                 spin_unlock(&inode->i_lock);
5193                 return -ETXTBSY;
5194         }
5195         atomic_set(&inode->i_writecount, -1);
5196         spin_unlock(&inode->i_lock);
5197
5198         return 0;
5199 }
5200
5201 void restore_bitmap_write_access(struct file *file)
5202 {
5203         struct inode *inode = file->f_mapping->host;
5204
5205         spin_lock(&inode->i_lock);
5206         atomic_set(&inode->i_writecount, 1);
5207         spin_unlock(&inode->i_lock);
5208 }
5209
5210 static void md_clean(struct mddev *mddev)
5211 {
5212         mddev->array_sectors = 0;
5213         mddev->external_size = 0;
5214         mddev->dev_sectors = 0;
5215         mddev->raid_disks = 0;
5216         mddev->recovery_cp = 0;
5217         mddev->resync_min = 0;
5218         mddev->resync_max = MaxSector;
5219         mddev->reshape_position = MaxSector;
5220         mddev->external = 0;
5221         mddev->persistent = 0;
5222         mddev->level = LEVEL_NONE;
5223         mddev->clevel[0] = 0;
5224         mddev->flags = 0;
5225         mddev->ro = 0;
5226         mddev->metadata_type[0] = 0;
5227         mddev->chunk_sectors = 0;
5228         mddev->ctime = mddev->utime = 0;
5229         mddev->layout = 0;
5230         mddev->max_disks = 0;
5231         mddev->events = 0;
5232         mddev->can_decrease_events = 0;
5233         mddev->delta_disks = 0;
5234         mddev->reshape_backwards = 0;
5235         mddev->new_level = LEVEL_NONE;
5236         mddev->new_layout = 0;
5237         mddev->new_chunk_sectors = 0;
5238         mddev->curr_resync = 0;
5239         atomic64_set(&mddev->resync_mismatches, 0);
5240         mddev->suspend_lo = mddev->suspend_hi = 0;
5241         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5242         mddev->recovery = 0;
5243         mddev->in_sync = 0;
5244         mddev->changed = 0;
5245         mddev->degraded = 0;
5246         mddev->safemode = 0;
5247         mddev->merge_check_needed = 0;
5248         mddev->bitmap_info.offset = 0;
5249         mddev->bitmap_info.default_offset = 0;
5250         mddev->bitmap_info.default_space = 0;
5251         mddev->bitmap_info.chunksize = 0;
5252         mddev->bitmap_info.daemon_sleep = 0;
5253         mddev->bitmap_info.max_write_behind = 0;
5254 }
5255
5256 static void __md_stop_writes(struct mddev *mddev)
5257 {
5258         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5259         if (mddev->sync_thread) {
5260                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5261                 md_reap_sync_thread(mddev);
5262         }
5263
5264         del_timer_sync(&mddev->safemode_timer);
5265
5266         bitmap_flush(mddev);
5267         md_super_wait(mddev);
5268
5269         if (mddev->ro == 0 &&
5270             (!mddev->in_sync || (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5271                 /* mark array as shutdown cleanly */
5272                 mddev->in_sync = 1;
5273                 md_update_sb(mddev, 1);
5274         }
5275 }
5276
5277 void md_stop_writes(struct mddev *mddev)
5278 {
5279         mddev_lock_nointr(mddev);
5280         __md_stop_writes(mddev);
5281         mddev_unlock(mddev);
5282 }
5283 EXPORT_SYMBOL_GPL(md_stop_writes);
5284
5285 static void __md_stop(struct mddev *mddev)
5286 {
5287         mddev->ready = 0;
5288         mddev->pers->stop(mddev);
5289         if (mddev->pers->sync_request && mddev->to_remove == NULL)
5290                 mddev->to_remove = &md_redundancy_group;
5291         module_put(mddev->pers->owner);
5292         mddev->pers = NULL;
5293         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5294 }
5295
5296 void md_stop(struct mddev *mddev)
5297 {
5298         /* stop the array and free an attached data structures.
5299          * This is called from dm-raid
5300          */
5301         __md_stop(mddev);
5302         bitmap_destroy(mddev);
5303         if (mddev->bio_set)
5304                 bioset_free(mddev->bio_set);
5305 }
5306
5307 EXPORT_SYMBOL_GPL(md_stop);
5308
5309 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5310 {
5311         int err = 0;
5312         int did_freeze = 0;
5313
5314         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5315                 did_freeze = 1;
5316                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5317                 md_wakeup_thread(mddev->thread);
5318         }
5319         if (mddev->sync_thread) {
5320                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5321                 /* Thread might be blocked waiting for metadata update
5322                  * which will now never happen */
5323                 wake_up_process(mddev->sync_thread->tsk);
5324         }
5325         mddev_unlock(mddev);
5326         wait_event(resync_wait, mddev->sync_thread == NULL);
5327         mddev_lock_nointr(mddev);
5328
5329         mutex_lock(&mddev->open_mutex);
5330         if (atomic_read(&mddev->openers) > !!bdev ||
5331             mddev->sync_thread ||
5332             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5333                 printk("md: %s still in use.\n",mdname(mddev));
5334                 if (did_freeze) {
5335                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5336                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5337                         md_wakeup_thread(mddev->thread);
5338                 }
5339                 err = -EBUSY;
5340                 goto out;
5341         }
5342         if (mddev->pers) {
5343                 __md_stop_writes(mddev);
5344
5345                 err  = -ENXIO;
5346                 if (mddev->ro==1)
5347                         goto out;
5348                 mddev->ro = 1;
5349                 set_disk_ro(mddev->gendisk, 1);
5350                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5351                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5352                 md_wakeup_thread(mddev->thread);
5353                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5354                 err = 0;
5355         }
5356 out:
5357         mutex_unlock(&mddev->open_mutex);
5358         return err;
5359 }
5360
5361 /* mode:
5362  *   0 - completely stop and dis-assemble array
5363  *   2 - stop but do not disassemble array
5364  */
5365 static int do_md_stop(struct mddev * mddev, int mode,
5366                       struct block_device *bdev)
5367 {
5368         struct gendisk *disk = mddev->gendisk;
5369         struct md_rdev *rdev;
5370         int did_freeze = 0;
5371
5372         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5373                 did_freeze = 1;
5374                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5375                 md_wakeup_thread(mddev->thread);
5376         }
5377         if (mddev->sync_thread) {
5378                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5379                 /* Thread might be blocked waiting for metadata update
5380                  * which will now never happen */
5381                 wake_up_process(mddev->sync_thread->tsk);
5382         }
5383         mddev_unlock(mddev);
5384         wait_event(resync_wait, mddev->sync_thread == NULL);
5385         mddev_lock_nointr(mddev);
5386
5387         mutex_lock(&mddev->open_mutex);
5388         if (atomic_read(&mddev->openers) > !!bdev ||
5389             mddev->sysfs_active ||
5390             mddev->sync_thread ||
5391             (bdev && !test_bit(MD_STILL_CLOSED, &mddev->flags))) {
5392                 printk("md: %s still in use.\n",mdname(mddev));
5393                 mutex_unlock(&mddev->open_mutex);
5394                 if (did_freeze) {
5395                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5396                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5397                         md_wakeup_thread(mddev->thread);
5398                 }
5399                 return -EBUSY;
5400         }
5401         if (mddev->pers) {
5402                 if (mddev->ro)
5403                         set_disk_ro(disk, 0);
5404
5405                 __md_stop_writes(mddev);
5406                 __md_stop(mddev);
5407                 mddev->queue->merge_bvec_fn = NULL;
5408                 mddev->queue->backing_dev_info.congested_fn = NULL;
5409
5410                 /* tell userspace to handle 'inactive' */
5411                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5412
5413                 rdev_for_each(rdev, mddev)
5414                         if (rdev->raid_disk >= 0)
5415                                 sysfs_unlink_rdev(mddev, rdev);
5416
5417                 set_capacity(disk, 0);
5418                 mutex_unlock(&mddev->open_mutex);
5419                 mddev->changed = 1;
5420                 revalidate_disk(disk);
5421
5422                 if (mddev->ro)
5423                         mddev->ro = 0;
5424         } else
5425                 mutex_unlock(&mddev->open_mutex);
5426         /*
5427          * Free resources if final stop
5428          */
5429         if (mode == 0) {
5430                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
5431
5432                 bitmap_destroy(mddev);
5433                 if (mddev->bitmap_info.file) {
5434                         restore_bitmap_write_access(mddev->bitmap_info.file);
5435                         fput(mddev->bitmap_info.file);
5436                         mddev->bitmap_info.file = NULL;
5437                 }
5438                 mddev->bitmap_info.offset = 0;
5439
5440                 export_array(mddev);
5441
5442                 md_clean(mddev);
5443                 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5444                 if (mddev->hold_active == UNTIL_STOP)
5445                         mddev->hold_active = 0;
5446         }
5447         blk_integrity_unregister(disk);
5448         md_new_event(mddev);
5449         sysfs_notify_dirent_safe(mddev->sysfs_state);
5450         return 0;
5451 }
5452
5453 #ifndef MODULE
5454 static void autorun_array(struct mddev *mddev)
5455 {
5456         struct md_rdev *rdev;
5457         int err;
5458
5459         if (list_empty(&mddev->disks))
5460                 return;
5461
5462         printk(KERN_INFO "md: running: ");
5463
5464         rdev_for_each(rdev, mddev) {
5465                 char b[BDEVNAME_SIZE];
5466                 printk("<%s>", bdevname(rdev->bdev,b));
5467         }
5468         printk("\n");
5469
5470         err = do_md_run(mddev);
5471         if (err) {
5472                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
5473                 do_md_stop(mddev, 0, NULL);
5474         }
5475 }
5476
5477 /*
5478  * lets try to run arrays based on all disks that have arrived
5479  * until now. (those are in pending_raid_disks)
5480  *
5481  * the method: pick the first pending disk, collect all disks with
5482  * the same UUID, remove all from the pending list and put them into
5483  * the 'same_array' list. Then order this list based on superblock
5484  * update time (freshest comes first), kick out 'old' disks and
5485  * compare superblocks. If everything's fine then run it.
5486  *
5487  * If "unit" is allocated, then bump its reference count
5488  */
5489 static void autorun_devices(int part)
5490 {
5491         struct md_rdev *rdev0, *rdev, *tmp;
5492         struct mddev *mddev;
5493         char b[BDEVNAME_SIZE];
5494
5495         printk(KERN_INFO "md: autorun ...\n");
5496         while (!list_empty(&pending_raid_disks)) {
5497                 int unit;
5498                 dev_t dev;
5499                 LIST_HEAD(candidates);
5500                 rdev0 = list_entry(pending_raid_disks.next,
5501                                          struct md_rdev, same_set);
5502
5503                 printk(KERN_INFO "md: considering %s ...\n",
5504                         bdevname(rdev0->bdev,b));
5505                 INIT_LIST_HEAD(&candidates);
5506                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5507                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5508                                 printk(KERN_INFO "md:  adding %s ...\n",
5509                                         bdevname(rdev->bdev,b));
5510                                 list_move(&rdev->same_set, &candidates);
5511                         }
5512                 /*
5513                  * now we have a set of devices, with all of them having
5514                  * mostly sane superblocks. It's time to allocate the
5515                  * mddev.
5516                  */
5517                 if (part) {
5518                         dev = MKDEV(mdp_major,
5519                                     rdev0->preferred_minor << MdpMinorShift);
5520                         unit = MINOR(dev) >> MdpMinorShift;
5521                 } else {
5522                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5523                         unit = MINOR(dev);
5524                 }
5525                 if (rdev0->preferred_minor != unit) {
5526                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
5527                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5528                         break;
5529                 }
5530
5531                 md_probe(dev, NULL, NULL);
5532                 mddev = mddev_find(dev);
5533                 if (!mddev || !mddev->gendisk) {
5534                         if (mddev)
5535                                 mddev_put(mddev);
5536                         printk(KERN_ERR
5537                                 "md: cannot allocate memory for md drive.\n");
5538                         break;
5539                 }
5540                 if (mddev_lock(mddev)) 
5541                         printk(KERN_WARNING "md: %s locked, cannot run\n",
5542                                mdname(mddev));
5543                 else if (mddev->raid_disks || mddev->major_version
5544                          || !list_empty(&mddev->disks)) {
5545                         printk(KERN_WARNING 
5546                                 "md: %s already running, cannot run %s\n",
5547                                 mdname(mddev), bdevname(rdev0->bdev,b));
5548                         mddev_unlock(mddev);
5549                 } else {
5550                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
5551                         mddev->persistent = 1;
5552                         rdev_for_each_list(rdev, tmp, &candidates) {
5553                                 list_del_init(&rdev->same_set);
5554                                 if (bind_rdev_to_array(rdev, mddev))
5555                                         export_rdev(rdev);
5556                         }
5557                         autorun_array(mddev);
5558                         mddev_unlock(mddev);
5559                 }
5560                 /* on success, candidates will be empty, on error
5561                  * it won't...
5562                  */
5563                 rdev_for_each_list(rdev, tmp, &candidates) {
5564                         list_del_init(&rdev->same_set);
5565                         export_rdev(rdev);
5566                 }
5567                 mddev_put(mddev);
5568         }
5569         printk(KERN_INFO "md: ... autorun DONE.\n");
5570 }
5571 #endif /* !MODULE */
5572
5573 static int get_version(void __user * arg)
5574 {
5575         mdu_version_t ver;
5576
5577         ver.major = MD_MAJOR_VERSION;
5578         ver.minor = MD_MINOR_VERSION;
5579         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5580
5581         if (copy_to_user(arg, &ver, sizeof(ver)))
5582                 return -EFAULT;
5583
5584         return 0;
5585 }
5586
5587 static int get_array_info(struct mddev * mddev, void __user * arg)
5588 {
5589         mdu_array_info_t info;
5590         int nr,working,insync,failed,spare;
5591         struct md_rdev *rdev;
5592
5593         nr = working = insync = failed = spare = 0;
5594         rcu_read_lock();
5595         rdev_for_each_rcu(rdev, mddev) {
5596                 nr++;
5597                 if (test_bit(Faulty, &rdev->flags))
5598                         failed++;
5599                 else {
5600                         working++;
5601                         if (test_bit(In_sync, &rdev->flags))
5602                                 insync++;       
5603                         else
5604                                 spare++;
5605                 }
5606         }
5607         rcu_read_unlock();
5608
5609         info.major_version = mddev->major_version;
5610         info.minor_version = mddev->minor_version;
5611         info.patch_version = MD_PATCHLEVEL_VERSION;
5612         info.ctime         = mddev->ctime;
5613         info.level         = mddev->level;
5614         info.size          = mddev->dev_sectors / 2;
5615         if (info.size != mddev->dev_sectors / 2) /* overflow */
5616                 info.size = -1;
5617         info.nr_disks      = nr;
5618         info.raid_disks    = mddev->raid_disks;
5619         info.md_minor      = mddev->md_minor;
5620         info.not_persistent= !mddev->persistent;
5621
5622         info.utime         = mddev->utime;
5623         info.state         = 0;
5624         if (mddev->in_sync)
5625                 info.state = (1<<MD_SB_CLEAN);
5626         if (mddev->bitmap && mddev->bitmap_info.offset)
5627                 info.state = (1<<MD_SB_BITMAP_PRESENT);
5628         info.active_disks  = insync;
5629         info.working_disks = working;
5630         info.failed_disks  = failed;
5631         info.spare_disks   = spare;
5632
5633         info.layout        = mddev->layout;
5634         info.chunk_size    = mddev->chunk_sectors << 9;
5635
5636         if (copy_to_user(arg, &info, sizeof(info)))
5637                 return -EFAULT;
5638
5639         return 0;
5640 }
5641
5642 static int get_bitmap_file(struct mddev * mddev, void __user * arg)
5643 {
5644         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5645         char *ptr, *buf = NULL;
5646         int err = -ENOMEM;
5647
5648         file = kmalloc(sizeof(*file), GFP_NOIO);
5649
5650         if (!file)
5651                 goto out;
5652
5653         /* bitmap disabled, zero the first byte and copy out */
5654         if (!mddev->bitmap || !mddev->bitmap->storage.file) {
5655                 file->pathname[0] = '\0';
5656                 goto copy_out;
5657         }
5658
5659         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
5660         if (!buf)
5661                 goto out;
5662
5663         ptr = d_path(&mddev->bitmap->storage.file->f_path,
5664                      buf, sizeof(file->pathname));
5665         if (IS_ERR(ptr))
5666                 goto out;
5667
5668         strcpy(file->pathname, ptr);
5669
5670 copy_out:
5671         err = 0;
5672         if (copy_to_user(arg, file, sizeof(*file)))
5673                 err = -EFAULT;
5674 out:
5675         kfree(buf);
5676         kfree(file);
5677         return err;
5678 }
5679
5680 static int get_disk_info(struct mddev * mddev, void __user * arg)
5681 {
5682         mdu_disk_info_t info;
5683         struct md_rdev *rdev;
5684
5685         if (copy_from_user(&info, arg, sizeof(info)))
5686                 return -EFAULT;
5687
5688         rcu_read_lock();
5689         rdev = find_rdev_nr_rcu(mddev, info.number);
5690         if (rdev) {
5691                 info.major = MAJOR(rdev->bdev->bd_dev);
5692                 info.minor = MINOR(rdev->bdev->bd_dev);
5693                 info.raid_disk = rdev->raid_disk;
5694                 info.state = 0;
5695                 if (test_bit(Faulty, &rdev->flags))
5696                         info.state |= (1<<MD_DISK_FAULTY);
5697                 else if (test_bit(In_sync, &rdev->flags)) {
5698                         info.state |= (1<<MD_DISK_ACTIVE);
5699                         info.state |= (1<<MD_DISK_SYNC);
5700                 }
5701                 if (test_bit(WriteMostly, &rdev->flags))
5702                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5703         } else {
5704                 info.major = info.minor = 0;
5705                 info.raid_disk = -1;
5706                 info.state = (1<<MD_DISK_REMOVED);
5707         }
5708         rcu_read_unlock();
5709
5710         if (copy_to_user(arg, &info, sizeof(info)))
5711                 return -EFAULT;
5712
5713         return 0;
5714 }
5715
5716 static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
5717 {
5718         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5719         struct md_rdev *rdev;
5720         dev_t dev = MKDEV(info->major,info->minor);
5721
5722         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5723                 return -EOVERFLOW;
5724
5725         if (!mddev->raid_disks) {
5726                 int err;
5727                 /* expecting a device which has a superblock */
5728                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5729                 if (IS_ERR(rdev)) {
5730                         printk(KERN_WARNING 
5731                                 "md: md_import_device returned %ld\n",
5732                                 PTR_ERR(rdev));
5733                         return PTR_ERR(rdev);
5734                 }
5735                 if (!list_empty(&mddev->disks)) {
5736                         struct md_rdev *rdev0
5737                                 = list_entry(mddev->disks.next,
5738                                              struct md_rdev, same_set);
5739                         err = super_types[mddev->major_version]
5740                                 .load_super(rdev, rdev0, mddev->minor_version);
5741                         if (err < 0) {
5742                                 printk(KERN_WARNING 
5743                                         "md: %s has different UUID to %s\n",
5744                                         bdevname(rdev->bdev,b), 
5745                                         bdevname(rdev0->bdev,b2));
5746                                 export_rdev(rdev);
5747                                 return -EINVAL;
5748                         }
5749                 }
5750                 err = bind_rdev_to_array(rdev, mddev);
5751                 if (err)
5752                         export_rdev(rdev);
5753                 return err;
5754         }
5755
5756         /*
5757          * add_new_disk can be used once the array is assembled
5758          * to add "hot spares".  They must already have a superblock
5759          * written
5760          */
5761         if (mddev->pers) {
5762                 int err;
5763                 if (!mddev->pers->hot_add_disk) {
5764                         printk(KERN_WARNING 
5765                                 "%s: personality does not support diskops!\n",
5766                                mdname(mddev));
5767                         return -EINVAL;
5768                 }
5769                 if (mddev->persistent)
5770                         rdev = md_import_device(dev, mddev->major_version,
5771                                                 mddev->minor_version);
5772                 else
5773                         rdev = md_import_device(dev, -1, -1);
5774                 if (IS_ERR(rdev)) {
5775                         printk(KERN_WARNING 
5776                                 "md: md_import_device returned %ld\n",
5777                                 PTR_ERR(rdev));
5778                         return PTR_ERR(rdev);
5779                 }
5780                 /* set saved_raid_disk if appropriate */
5781                 if (!mddev->persistent) {
5782                         if (info->state & (1<<MD_DISK_SYNC)  &&
5783                             info->raid_disk < mddev->raid_disks) {
5784                                 rdev->raid_disk = info->raid_disk;
5785                                 set_bit(In_sync, &rdev->flags);
5786                                 clear_bit(Bitmap_sync, &rdev->flags);
5787                         } else
5788                                 rdev->raid_disk = -1;
5789                         rdev->saved_raid_disk = rdev->raid_disk;
5790                 } else
5791                         super_types[mddev->major_version].
5792                                 validate_super(mddev, rdev);
5793                 if ((info->state & (1<<MD_DISK_SYNC)) &&
5794                      rdev->raid_disk != info->raid_disk) {
5795                         /* This was a hot-add request, but events doesn't
5796                          * match, so reject it.
5797                          */
5798                         export_rdev(rdev);
5799                         return -EINVAL;
5800                 }
5801
5802                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
5803                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5804                         set_bit(WriteMostly, &rdev->flags);
5805                 else
5806                         clear_bit(WriteMostly, &rdev->flags);
5807
5808                 rdev->raid_disk = -1;
5809                 err = bind_rdev_to_array(rdev, mddev);
5810                 if (!err && !mddev->pers->hot_remove_disk) {
5811                         /* If there is hot_add_disk but no hot_remove_disk
5812                          * then added disks for geometry changes,
5813                          * and should be added immediately.
5814                          */
5815                         super_types[mddev->major_version].
5816                                 validate_super(mddev, rdev);
5817                         err = mddev->pers->hot_add_disk(mddev, rdev);
5818                         if (err)
5819                                 unbind_rdev_from_array(rdev);
5820                 }
5821                 if (err)
5822                         export_rdev(rdev);
5823                 else
5824                         sysfs_notify_dirent_safe(rdev->sysfs_state);
5825
5826                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
5827                 if (mddev->degraded)
5828                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5829                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5830                 if (!err)
5831                         md_new_event(mddev);
5832                 md_wakeup_thread(mddev->thread);
5833                 return err;
5834         }
5835
5836         /* otherwise, add_new_disk is only allowed
5837          * for major_version==0 superblocks
5838          */
5839         if (mddev->major_version != 0) {
5840                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
5841                        mdname(mddev));
5842                 return -EINVAL;
5843         }
5844
5845         if (!(info->state & (1<<MD_DISK_FAULTY))) {
5846                 int err;
5847                 rdev = md_import_device(dev, -1, 0);
5848                 if (IS_ERR(rdev)) {
5849                         printk(KERN_WARNING 
5850                                 "md: error, md_import_device() returned %ld\n",
5851                                 PTR_ERR(rdev));
5852                         return PTR_ERR(rdev);
5853                 }
5854                 rdev->desc_nr = info->number;
5855                 if (info->raid_disk < mddev->raid_disks)
5856                         rdev->raid_disk = info->raid_disk;
5857                 else
5858                         rdev->raid_disk = -1;
5859
5860                 if (rdev->raid_disk < mddev->raid_disks)
5861                         if (info->state & (1<<MD_DISK_SYNC))
5862                                 set_bit(In_sync, &rdev->flags);
5863
5864                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
5865                         set_bit(WriteMostly, &rdev->flags);
5866
5867                 if (!mddev->persistent) {
5868                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
5869                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5870                 } else
5871                         rdev->sb_start = calc_dev_sboffset(rdev);
5872                 rdev->sectors = rdev->sb_start;
5873
5874                 err = bind_rdev_to_array(rdev, mddev);
5875                 if (err) {
5876                         export_rdev(rdev);
5877                         return err;
5878                 }
5879         }
5880
5881         return 0;
5882 }
5883
5884 static int hot_remove_disk(struct mddev * mddev, dev_t dev)
5885 {
5886         char b[BDEVNAME_SIZE];
5887         struct md_rdev *rdev;
5888
5889         rdev = find_rdev(mddev, dev);
5890         if (!rdev)
5891                 return -ENXIO;
5892
5893         clear_bit(Blocked, &rdev->flags);
5894         remove_and_add_spares(mddev, rdev);
5895
5896         if (rdev->raid_disk >= 0)
5897                 goto busy;
5898
5899         kick_rdev_from_array(rdev);
5900         md_update_sb(mddev, 1);
5901         md_new_event(mddev);
5902
5903         return 0;
5904 busy:
5905         printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
5906                 bdevname(rdev->bdev,b), mdname(mddev));
5907         return -EBUSY;
5908 }
5909
5910 static int hot_add_disk(struct mddev * mddev, dev_t dev)
5911 {
5912         char b[BDEVNAME_SIZE];
5913         int err;
5914         struct md_rdev *rdev;
5915
5916         if (!mddev->pers)
5917                 return -ENODEV;
5918
5919         if (mddev->major_version != 0) {
5920                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
5921                         " version-0 superblocks.\n",
5922                         mdname(mddev));
5923                 return -EINVAL;
5924         }
5925         if (!mddev->pers->hot_add_disk) {
5926                 printk(KERN_WARNING 
5927                         "%s: personality does not support diskops!\n",
5928                         mdname(mddev));
5929                 return -EINVAL;
5930         }
5931
5932         rdev = md_import_device(dev, -1, 0);
5933         if (IS_ERR(rdev)) {
5934                 printk(KERN_WARNING 
5935                         "md: error, md_import_device() returned %ld\n",
5936                         PTR_ERR(rdev));
5937                 return -EINVAL;
5938         }
5939
5940         if (mddev->persistent)
5941                 rdev->sb_start = calc_dev_sboffset(rdev);
5942         else
5943                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5944
5945         rdev->sectors = rdev->sb_start;
5946
5947         if (test_bit(Faulty, &rdev->flags)) {
5948                 printk(KERN_WARNING 
5949                         "md: can not hot-add faulty %s disk to %s!\n",
5950                         bdevname(rdev->bdev,b), mdname(mddev));
5951                 err = -EINVAL;
5952                 goto abort_export;
5953         }
5954         clear_bit(In_sync, &rdev->flags);
5955         rdev->desc_nr = -1;
5956         rdev->saved_raid_disk = -1;
5957         err = bind_rdev_to_array(rdev, mddev);
5958         if (err)
5959                 goto abort_export;
5960
5961         /*
5962          * The rest should better be atomic, we can have disk failures
5963          * noticed in interrupt contexts ...
5964          */
5965
5966         rdev->raid_disk = -1;
5967
5968         md_update_sb(mddev, 1);
5969
5970         /*
5971          * Kick recovery, maybe this spare has to be added to the
5972          * array immediately.
5973          */
5974         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5975         md_wakeup_thread(mddev->thread);
5976         md_new_event(mddev);
5977         return 0;
5978
5979 abort_export:
5980         export_rdev(rdev);
5981         return err;
5982 }
5983
5984 static int set_bitmap_file(struct mddev *mddev, int fd)
5985 {
5986         int err;
5987
5988         if (mddev->pers) {
5989                 if (!mddev->pers->quiesce)
5990                         return -EBUSY;
5991                 if (mddev->recovery || mddev->sync_thread)
5992                         return -EBUSY;
5993                 /* we should be able to change the bitmap.. */
5994         }
5995
5996
5997         if (fd >= 0) {
5998                 if (mddev->bitmap)
5999                         return -EEXIST; /* cannot add when bitmap is present */
6000                 mddev->bitmap_info.file = fget(fd);
6001
6002                 if (mddev->bitmap_info.file == NULL) {
6003                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
6004                                mdname(mddev));
6005                         return -EBADF;
6006                 }
6007
6008                 err = deny_bitmap_write_access(mddev->bitmap_info.file);
6009                 if (err) {
6010                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
6011                                mdname(mddev));
6012                         fput(mddev->bitmap_info.file);
6013                         mddev->bitmap_info.file = NULL;
6014                         return err;
6015                 }
6016                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6017         } else if (mddev->bitmap == NULL)
6018                 return -ENOENT; /* cannot remove what isn't there */
6019         err = 0;
6020         if (mddev->pers) {
6021                 mddev->pers->quiesce(mddev, 1);
6022                 if (fd >= 0) {
6023                         err = bitmap_create(mddev);
6024                         if (!err)
6025                                 err = bitmap_load(mddev);
6026                 }
6027                 if (fd < 0 || err) {
6028                         bitmap_destroy(mddev);
6029                         fd = -1; /* make sure to put the file */
6030                 }
6031                 mddev->pers->quiesce(mddev, 0);
6032         }
6033         if (fd < 0) {
6034                 if (mddev->bitmap_info.file) {
6035                         restore_bitmap_write_access(mddev->bitmap_info.file);
6036                         fput(mddev->bitmap_info.file);
6037                 }
6038                 mddev->bitmap_info.file = NULL;
6039         }
6040
6041         return err;
6042 }
6043
6044 /*
6045  * set_array_info is used two different ways
6046  * The original usage is when creating a new array.
6047  * In this usage, raid_disks is > 0 and it together with
6048  *  level, size, not_persistent,layout,chunksize determine the
6049  *  shape of the array.
6050  *  This will always create an array with a type-0.90.0 superblock.
6051  * The newer usage is when assembling an array.
6052  *  In this case raid_disks will be 0, and the major_version field is
6053  *  use to determine which style super-blocks are to be found on the devices.
6054  *  The minor and patch _version numbers are also kept incase the
6055  *  super_block handler wishes to interpret them.
6056  */
6057 static int set_array_info(struct mddev * mddev, mdu_array_info_t *info)
6058 {
6059
6060         if (info->raid_disks == 0) {
6061                 /* just setting version number for superblock loading */
6062                 if (info->major_version < 0 ||
6063                     info->major_version >= ARRAY_SIZE(super_types) ||
6064                     super_types[info->major_version].name == NULL) {
6065                         /* maybe try to auto-load a module? */
6066                         printk(KERN_INFO 
6067                                 "md: superblock version %d not known\n",
6068                                 info->major_version);
6069                         return -EINVAL;
6070                 }
6071                 mddev->major_version = info->major_version;
6072                 mddev->minor_version = info->minor_version;
6073                 mddev->patch_version = info->patch_version;
6074                 mddev->persistent = !info->not_persistent;
6075                 /* ensure mddev_put doesn't delete this now that there
6076                  * is some minimal configuration.
6077                  */
6078                 mddev->ctime         = get_seconds();
6079                 return 0;
6080         }
6081         mddev->major_version = MD_MAJOR_VERSION;
6082         mddev->minor_version = MD_MINOR_VERSION;
6083         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6084         mddev->ctime         = get_seconds();
6085
6086         mddev->level         = info->level;
6087         mddev->clevel[0]     = 0;
6088         mddev->dev_sectors   = 2 * (sector_t)info->size;
6089         mddev->raid_disks    = info->raid_disks;
6090         /* don't set md_minor, it is determined by which /dev/md* was
6091          * openned
6092          */
6093         if (info->state & (1<<MD_SB_CLEAN))
6094                 mddev->recovery_cp = MaxSector;
6095         else
6096                 mddev->recovery_cp = 0;
6097         mddev->persistent    = ! info->not_persistent;
6098         mddev->external      = 0;
6099
6100         mddev->layout        = info->layout;
6101         mddev->chunk_sectors = info->chunk_size >> 9;
6102
6103         mddev->max_disks     = MD_SB_DISKS;
6104
6105         if (mddev->persistent)
6106                 mddev->flags         = 0;
6107         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6108
6109         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6110         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6111         mddev->bitmap_info.offset = 0;
6112
6113         mddev->reshape_position = MaxSector;
6114
6115         /*
6116          * Generate a 128 bit UUID
6117          */
6118         get_random_bytes(mddev->uuid, 16);
6119
6120         mddev->new_level = mddev->level;
6121         mddev->new_chunk_sectors = mddev->chunk_sectors;
6122         mddev->new_layout = mddev->layout;
6123         mddev->delta_disks = 0;
6124         mddev->reshape_backwards = 0;
6125
6126         return 0;
6127 }
6128
6129 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6130 {
6131         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6132
6133         if (mddev->external_size)
6134                 return;
6135
6136         mddev->array_sectors = array_sectors;
6137 }
6138 EXPORT_SYMBOL(md_set_array_sectors);
6139
6140 static int update_size(struct mddev *mddev, sector_t num_sectors)
6141 {
6142         struct md_rdev *rdev;
6143         int rv;
6144         int fit = (num_sectors == 0);
6145
6146         if (mddev->pers->resize == NULL)
6147                 return -EINVAL;
6148         /* The "num_sectors" is the number of sectors of each device that
6149          * is used.  This can only make sense for arrays with redundancy.
6150          * linear and raid0 always use whatever space is available. We can only
6151          * consider changing this number if no resync or reconstruction is
6152          * happening, and if the new size is acceptable. It must fit before the
6153          * sb_start or, if that is <data_offset, it must fit before the size
6154          * of each device.  If num_sectors is zero, we find the largest size
6155          * that fits.
6156          */
6157         if (mddev->sync_thread)
6158                 return -EBUSY;
6159
6160         rdev_for_each(rdev, mddev) {
6161                 sector_t avail = rdev->sectors;
6162
6163                 if (fit && (num_sectors == 0 || num_sectors > avail))
6164                         num_sectors = avail;
6165                 if (avail < num_sectors)
6166                         return -ENOSPC;
6167         }
6168         rv = mddev->pers->resize(mddev, num_sectors);
6169         if (!rv)
6170                 revalidate_disk(mddev->gendisk);
6171         return rv;
6172 }
6173
6174 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6175 {
6176         int rv;
6177         struct md_rdev *rdev;
6178         /* change the number of raid disks */
6179         if (mddev->pers->check_reshape == NULL)
6180                 return -EINVAL;
6181         if (raid_disks <= 0 ||
6182             (mddev->max_disks && raid_disks >= mddev->max_disks))
6183                 return -EINVAL;
6184         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
6185                 return -EBUSY;
6186
6187         rdev_for_each(rdev, mddev) {
6188                 if (mddev->raid_disks < raid_disks &&
6189                     rdev->data_offset < rdev->new_data_offset)
6190                         return -EINVAL;
6191                 if (mddev->raid_disks > raid_disks &&
6192                     rdev->data_offset > rdev->new_data_offset)
6193                         return -EINVAL;
6194         }
6195
6196         mddev->delta_disks = raid_disks - mddev->raid_disks;
6197         if (mddev->delta_disks < 0)
6198                 mddev->reshape_backwards = 1;
6199         else if (mddev->delta_disks > 0)
6200                 mddev->reshape_backwards = 0;
6201
6202         rv = mddev->pers->check_reshape(mddev);
6203         if (rv < 0) {
6204                 mddev->delta_disks = 0;
6205                 mddev->reshape_backwards = 0;
6206         }
6207         return rv;
6208 }
6209
6210
6211 /*
6212  * update_array_info is used to change the configuration of an
6213  * on-line array.
6214  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6215  * fields in the info are checked against the array.
6216  * Any differences that cannot be handled will cause an error.
6217  * Normally, only one change can be managed at a time.
6218  */
6219 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6220 {
6221         int rv = 0;
6222         int cnt = 0;
6223         int state = 0;
6224
6225         /* calculate expected state,ignoring low bits */
6226         if (mddev->bitmap && mddev->bitmap_info.offset)
6227                 state |= (1 << MD_SB_BITMAP_PRESENT);
6228
6229         if (mddev->major_version != info->major_version ||
6230             mddev->minor_version != info->minor_version ||
6231 /*          mddev->patch_version != info->patch_version || */
6232             mddev->ctime         != info->ctime         ||
6233             mddev->level         != info->level         ||
6234 /*          mddev->layout        != info->layout        || */
6235             !mddev->persistent   != info->not_persistent||
6236             mddev->chunk_sectors != info->chunk_size >> 9 ||
6237             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6238             ((state^info->state) & 0xfffffe00)
6239                 )
6240                 return -EINVAL;
6241         /* Check there is only one change */
6242         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6243                 cnt++;
6244         if (mddev->raid_disks != info->raid_disks)
6245                 cnt++;
6246         if (mddev->layout != info->layout)
6247                 cnt++;
6248         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6249                 cnt++;
6250         if (cnt == 0)
6251                 return 0;
6252         if (cnt > 1)
6253                 return -EINVAL;
6254
6255         if (mddev->layout != info->layout) {
6256                 /* Change layout
6257                  * we don't need to do anything at the md level, the
6258                  * personality will take care of it all.
6259                  */
6260                 if (mddev->pers->check_reshape == NULL)
6261                         return -EINVAL;
6262                 else {
6263                         mddev->new_layout = info->layout;
6264                         rv = mddev->pers->check_reshape(mddev);
6265                         if (rv)
6266                                 mddev->new_layout = mddev->layout;
6267                         return rv;
6268                 }
6269         }
6270         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6271                 rv = update_size(mddev, (sector_t)info->size * 2);
6272
6273         if (mddev->raid_disks    != info->raid_disks)
6274                 rv = update_raid_disks(mddev, info->raid_disks);
6275
6276         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6277                 if (mddev->pers->quiesce == NULL)
6278                         return -EINVAL;
6279                 if (mddev->recovery || mddev->sync_thread)
6280                         return -EBUSY;
6281                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6282                         /* add the bitmap */
6283                         if (mddev->bitmap)
6284                                 return -EEXIST;
6285                         if (mddev->bitmap_info.default_offset == 0)
6286                                 return -EINVAL;
6287                         mddev->bitmap_info.offset =
6288                                 mddev->bitmap_info.default_offset;
6289                         mddev->bitmap_info.space =
6290                                 mddev->bitmap_info.default_space;
6291                         mddev->pers->quiesce(mddev, 1);
6292                         rv = bitmap_create(mddev);
6293                         if (!rv)
6294                                 rv = bitmap_load(mddev);
6295                         if (rv)
6296                                 bitmap_destroy(mddev);
6297                         mddev->pers->quiesce(mddev, 0);
6298                 } else {
6299                         /* remove the bitmap */
6300                         if (!mddev->bitmap)
6301                                 return -ENOENT;
6302                         if (mddev->bitmap->storage.file)
6303                                 return -EINVAL;
6304                         mddev->pers->quiesce(mddev, 1);
6305                         bitmap_destroy(mddev);
6306                         mddev->pers->quiesce(mddev, 0);
6307                         mddev->bitmap_info.offset = 0;
6308                 }
6309         }
6310         md_update_sb(mddev, 1);
6311         return rv;
6312 }
6313
6314 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6315 {
6316         struct md_rdev *rdev;
6317         int err = 0;
6318
6319         if (mddev->pers == NULL)
6320                 return -ENODEV;
6321
6322         rcu_read_lock();
6323         rdev = find_rdev_rcu(mddev, dev);
6324         if (!rdev)
6325                 err =  -ENODEV;
6326         else {
6327                 md_error(mddev, rdev);
6328                 if (!test_bit(Faulty, &rdev->flags))
6329                         err = -EBUSY;
6330         }
6331         rcu_read_unlock();
6332         return err;
6333 }
6334
6335 /*
6336  * We have a problem here : there is no easy way to give a CHS
6337  * virtual geometry. We currently pretend that we have a 2 heads
6338  * 4 sectors (with a BIG number of cylinders...). This drives
6339  * dosfs just mad... ;-)
6340  */
6341 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6342 {
6343         struct mddev *mddev = bdev->bd_disk->private_data;
6344
6345         geo->heads = 2;
6346         geo->sectors = 4;
6347         geo->cylinders = mddev->array_sectors / 8;
6348         return 0;
6349 }
6350
6351 static inline bool md_ioctl_valid(unsigned int cmd)
6352 {
6353         switch (cmd) {
6354         case ADD_NEW_DISK:
6355         case BLKROSET:
6356         case GET_ARRAY_INFO:
6357         case GET_BITMAP_FILE:
6358         case GET_DISK_INFO:
6359         case HOT_ADD_DISK:
6360         case HOT_REMOVE_DISK:
6361         case PRINT_RAID_DEBUG:
6362         case RAID_AUTORUN:
6363         case RAID_VERSION:
6364         case RESTART_ARRAY_RW:
6365         case RUN_ARRAY:
6366         case SET_ARRAY_INFO:
6367         case SET_BITMAP_FILE:
6368         case SET_DISK_FAULTY:
6369         case STOP_ARRAY:
6370         case STOP_ARRAY_RO:
6371                 return true;
6372         default:
6373                 return false;
6374         }
6375 }
6376
6377 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6378                         unsigned int cmd, unsigned long arg)
6379 {
6380         int err = 0;
6381         void __user *argp = (void __user *)arg;
6382         struct mddev *mddev = NULL;
6383         int ro;
6384
6385         if (!md_ioctl_valid(cmd))
6386                 return -ENOTTY;
6387
6388         switch (cmd) {
6389         case RAID_VERSION:
6390         case GET_ARRAY_INFO:
6391         case GET_DISK_INFO:
6392                 break;
6393         default:
6394                 if (!capable(CAP_SYS_ADMIN))
6395                         return -EACCES;
6396         }
6397
6398         /*
6399          * Commands dealing with the RAID driver but not any
6400          * particular array:
6401          */
6402         switch (cmd) {
6403         case RAID_VERSION:
6404                 err = get_version(argp);
6405                 goto done;
6406
6407         case PRINT_RAID_DEBUG:
6408                 err = 0;
6409                 md_print_devices();
6410                 goto done;
6411
6412 #ifndef MODULE
6413         case RAID_AUTORUN:
6414                 err = 0;
6415                 autostart_arrays(arg);
6416                 goto done;
6417 #endif
6418         default:;
6419         }
6420
6421         /*
6422          * Commands creating/starting a new array:
6423          */
6424
6425         mddev = bdev->bd_disk->private_data;
6426
6427         if (!mddev) {
6428                 BUG();
6429                 goto abort;
6430         }
6431
6432         /* Some actions do not requires the mutex */
6433         switch (cmd) {
6434         case GET_ARRAY_INFO:
6435                 if (!mddev->raid_disks && !mddev->external)
6436                         err = -ENODEV;
6437                 else
6438                         err = get_array_info(mddev, argp);
6439                 goto abort;
6440
6441         case GET_DISK_INFO:
6442                 if (!mddev->raid_disks && !mddev->external)
6443                         err = -ENODEV;
6444                 else
6445                         err = get_disk_info(mddev, argp);
6446                 goto abort;
6447
6448         case SET_DISK_FAULTY:
6449                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6450                 goto abort;
6451         }
6452
6453         if (cmd == ADD_NEW_DISK)
6454                 /* need to ensure md_delayed_delete() has completed */
6455                 flush_workqueue(md_misc_wq);
6456
6457         if (cmd == HOT_REMOVE_DISK)
6458                 /* need to ensure recovery thread has run */
6459                 wait_event_interruptible_timeout(mddev->sb_wait,
6460                                                  !test_bit(MD_RECOVERY_NEEDED,
6461                                                            &mddev->flags),
6462                                                  msecs_to_jiffies(5000));
6463         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6464                 /* Need to flush page cache, and ensure no-one else opens
6465                  * and writes
6466                  */
6467                 mutex_lock(&mddev->open_mutex);
6468                 if (atomic_read(&mddev->openers) > 1) {
6469                         mutex_unlock(&mddev->open_mutex);
6470                         err = -EBUSY;
6471                         goto abort;
6472                 }
6473                 set_bit(MD_STILL_CLOSED, &mddev->flags);
6474                 mutex_unlock(&mddev->open_mutex);
6475                 sync_blockdev(bdev);
6476         }
6477         err = mddev_lock(mddev);
6478         if (err) {
6479                 printk(KERN_INFO 
6480                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
6481                         err, cmd);
6482                 goto abort;
6483         }
6484
6485         if (cmd == SET_ARRAY_INFO) {
6486                 mdu_array_info_t info;
6487                 if (!arg)
6488                         memset(&info, 0, sizeof(info));
6489                 else if (copy_from_user(&info, argp, sizeof(info))) {
6490                         err = -EFAULT;
6491                         goto abort_unlock;
6492                 }
6493                 if (mddev->pers) {
6494                         err = update_array_info(mddev, &info);
6495                         if (err) {
6496                                 printk(KERN_WARNING "md: couldn't update"
6497                                        " array info. %d\n", err);
6498                                 goto abort_unlock;
6499                         }
6500                         goto done_unlock;
6501                 }
6502                 if (!list_empty(&mddev->disks)) {
6503                         printk(KERN_WARNING
6504                                "md: array %s already has disks!\n",
6505                                mdname(mddev));
6506                         err = -EBUSY;
6507                         goto abort_unlock;
6508                 }
6509                 if (mddev->raid_disks) {
6510                         printk(KERN_WARNING
6511                                "md: array %s already initialised!\n",
6512                                mdname(mddev));
6513                         err = -EBUSY;
6514                         goto abort_unlock;
6515                 }
6516                 err = set_array_info(mddev, &info);
6517                 if (err) {
6518                         printk(KERN_WARNING "md: couldn't set"
6519                                " array info. %d\n", err);
6520                         goto abort_unlock;
6521                 }
6522                 goto done_unlock;
6523         }
6524
6525         /*
6526          * Commands querying/configuring an existing array:
6527          */
6528         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6529          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6530         if ((!mddev->raid_disks && !mddev->external)
6531             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6532             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6533             && cmd != GET_BITMAP_FILE) {
6534                 err = -ENODEV;
6535                 goto abort_unlock;
6536         }
6537
6538         /*
6539          * Commands even a read-only array can execute:
6540          */
6541         switch (cmd) {
6542         case GET_BITMAP_FILE:
6543                 err = get_bitmap_file(mddev, argp);
6544                 goto done_unlock;
6545
6546         case RESTART_ARRAY_RW:
6547                 err = restart_array(mddev);
6548                 goto done_unlock;
6549
6550         case STOP_ARRAY:
6551                 err = do_md_stop(mddev, 0, bdev);
6552                 goto done_unlock;
6553
6554         case STOP_ARRAY_RO:
6555                 err = md_set_readonly(mddev, bdev);
6556                 goto done_unlock;
6557
6558         case HOT_REMOVE_DISK:
6559                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6560                 goto done_unlock;
6561
6562         case ADD_NEW_DISK:
6563                 /* We can support ADD_NEW_DISK on read-only arrays
6564                  * on if we are re-adding a preexisting device.
6565                  * So require mddev->pers and MD_DISK_SYNC.
6566                  */
6567                 if (mddev->pers) {
6568                         mdu_disk_info_t info;
6569                         if (copy_from_user(&info, argp, sizeof(info)))
6570                                 err = -EFAULT;
6571                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6572                                 /* Need to clear read-only for this */
6573                                 break;
6574                         else
6575                                 err = add_new_disk(mddev, &info);
6576                         goto done_unlock;
6577                 }
6578                 break;
6579
6580         case BLKROSET:
6581                 if (get_user(ro, (int __user *)(arg))) {
6582                         err = -EFAULT;
6583                         goto done_unlock;
6584                 }
6585                 err = -EINVAL;
6586
6587                 /* if the bdev is going readonly the value of mddev->ro
6588                  * does not matter, no writes are coming
6589                  */
6590                 if (ro)
6591                         goto done_unlock;
6592
6593                 /* are we are already prepared for writes? */
6594                 if (mddev->ro != 1)
6595                         goto done_unlock;
6596
6597                 /* transitioning to readauto need only happen for
6598                  * arrays that call md_write_start
6599                  */
6600                 if (mddev->pers) {
6601                         err = restart_array(mddev);
6602                         if (err == 0) {
6603                                 mddev->ro = 2;
6604                                 set_disk_ro(mddev->gendisk, 0);
6605                         }
6606                 }
6607                 goto done_unlock;
6608         }
6609
6610         /*
6611          * The remaining ioctls are changing the state of the
6612          * superblock, so we do not allow them on read-only arrays.
6613          * However non-MD ioctls (e.g. get-size) will still come through
6614          * here and hit the 'default' below, so only disallow
6615          * 'md' ioctls, and switch to rw mode if started auto-readonly.
6616          */
6617         if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) {
6618                 if (mddev->ro == 2) {
6619                         mddev->ro = 0;
6620                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6621                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6622                         /* mddev_unlock will wake thread */
6623                         /* If a device failed while we were read-only, we
6624                          * need to make sure the metadata is updated now.
6625                          */
6626                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6627                                 mddev_unlock(mddev);
6628                                 wait_event(mddev->sb_wait,
6629                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6630                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6631                                 mddev_lock_nointr(mddev);
6632                         }
6633                 } else {
6634                         err = -EROFS;
6635                         goto abort_unlock;
6636                 }
6637         }
6638
6639         switch (cmd) {
6640         case ADD_NEW_DISK:
6641         {
6642                 mdu_disk_info_t info;
6643                 if (copy_from_user(&info, argp, sizeof(info)))
6644                         err = -EFAULT;
6645                 else
6646                         err = add_new_disk(mddev, &info);
6647                 goto done_unlock;
6648         }
6649
6650         case HOT_ADD_DISK:
6651                 err = hot_add_disk(mddev, new_decode_dev(arg));
6652                 goto done_unlock;
6653
6654         case RUN_ARRAY:
6655                 err = do_md_run(mddev);
6656                 goto done_unlock;
6657
6658         case SET_BITMAP_FILE:
6659                 err = set_bitmap_file(mddev, (int)arg);
6660                 goto done_unlock;
6661
6662         default:
6663                 err = -EINVAL;
6664                 goto abort_unlock;
6665         }
6666
6667 done_unlock:
6668 abort_unlock:
6669         if (mddev->hold_active == UNTIL_IOCTL &&
6670             err != -EINVAL)
6671                 mddev->hold_active = 0;
6672         mddev_unlock(mddev);
6673
6674         return err;
6675 done:
6676         if (err)
6677                 MD_BUG();
6678 abort:
6679         return err;
6680 }
6681 #ifdef CONFIG_COMPAT
6682 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
6683                     unsigned int cmd, unsigned long arg)
6684 {
6685         switch (cmd) {
6686         case HOT_REMOVE_DISK:
6687         case HOT_ADD_DISK:
6688         case SET_DISK_FAULTY:
6689         case SET_BITMAP_FILE:
6690                 /* These take in integer arg, do not convert */
6691                 break;
6692         default:
6693                 arg = (unsigned long)compat_ptr(arg);
6694                 break;
6695         }
6696
6697         return md_ioctl(bdev, mode, cmd, arg);
6698 }
6699 #endif /* CONFIG_COMPAT */
6700
6701 static int md_open(struct block_device *bdev, fmode_t mode)
6702 {
6703         /*
6704          * Succeed if we can lock the mddev, which confirms that
6705          * it isn't being stopped right now.
6706          */
6707         struct mddev *mddev = mddev_find(bdev->bd_dev);
6708         int err;
6709
6710         if (!mddev)
6711                 return -ENODEV;
6712
6713         if (mddev->gendisk != bdev->bd_disk) {
6714                 /* we are racing with mddev_put which is discarding this
6715                  * bd_disk.
6716                  */
6717                 mddev_put(mddev);
6718                 /* Wait until bdev->bd_disk is definitely gone */
6719                 flush_workqueue(md_misc_wq);
6720                 /* Then retry the open from the top */
6721                 return -ERESTARTSYS;
6722         }
6723         BUG_ON(mddev != bdev->bd_disk->private_data);
6724
6725         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
6726                 goto out;
6727
6728         err = 0;
6729         atomic_inc(&mddev->openers);
6730         clear_bit(MD_STILL_CLOSED, &mddev->flags);
6731         mutex_unlock(&mddev->open_mutex);
6732
6733         check_disk_change(bdev);
6734  out:
6735         return err;
6736 }
6737
6738 static void md_release(struct gendisk *disk, fmode_t mode)
6739 {
6740         struct mddev *mddev = disk->private_data;
6741
6742         BUG_ON(!mddev);
6743         atomic_dec(&mddev->openers);
6744         mddev_put(mddev);
6745 }
6746
6747 static int md_media_changed(struct gendisk *disk)
6748 {
6749         struct mddev *mddev = disk->private_data;
6750
6751         return mddev->changed;
6752 }
6753
6754 static int md_revalidate(struct gendisk *disk)
6755 {
6756         struct mddev *mddev = disk->private_data;
6757
6758         mddev->changed = 0;
6759         return 0;
6760 }
6761 static const struct block_device_operations md_fops =
6762 {
6763         .owner          = THIS_MODULE,
6764         .open           = md_open,
6765         .release        = md_release,
6766         .ioctl          = md_ioctl,
6767 #ifdef CONFIG_COMPAT
6768         .compat_ioctl   = md_compat_ioctl,
6769 #endif
6770         .getgeo         = md_getgeo,
6771         .media_changed  = md_media_changed,
6772         .revalidate_disk= md_revalidate,
6773 };
6774
6775 static int md_thread(void * arg)
6776 {
6777         struct md_thread *thread = arg;
6778
6779         /*
6780          * md_thread is a 'system-thread', it's priority should be very
6781          * high. We avoid resource deadlocks individually in each
6782          * raid personality. (RAID5 does preallocation) We also use RR and
6783          * the very same RT priority as kswapd, thus we will never get
6784          * into a priority inversion deadlock.
6785          *
6786          * we definitely have to have equal or higher priority than
6787          * bdflush, otherwise bdflush will deadlock if there are too
6788          * many dirty RAID5 blocks.
6789          */
6790
6791         allow_signal(SIGKILL);
6792         while (!kthread_should_stop()) {
6793
6794                 /* We need to wait INTERRUPTIBLE so that
6795                  * we don't add to the load-average.
6796                  * That means we need to be sure no signals are
6797                  * pending
6798                  */
6799                 if (signal_pending(current))
6800                         flush_signals(current);
6801
6802                 wait_event_interruptible_timeout
6803                         (thread->wqueue,
6804                          test_bit(THREAD_WAKEUP, &thread->flags)
6805                          || kthread_should_stop(),
6806                          thread->timeout);
6807
6808                 clear_bit(THREAD_WAKEUP, &thread->flags);
6809                 if (!kthread_should_stop())
6810                         thread->run(thread);
6811         }
6812
6813         return 0;
6814 }
6815
6816 void md_wakeup_thread(struct md_thread *thread)
6817 {
6818         if (thread) {
6819                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
6820                 set_bit(THREAD_WAKEUP, &thread->flags);
6821                 wake_up(&thread->wqueue);
6822         }
6823 }
6824
6825 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
6826                 struct mddev *mddev, const char *name)
6827 {
6828         struct md_thread *thread;
6829
6830         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
6831         if (!thread)
6832                 return NULL;
6833
6834         init_waitqueue_head(&thread->wqueue);
6835
6836         thread->run = run;
6837         thread->mddev = mddev;
6838         thread->timeout = MAX_SCHEDULE_TIMEOUT;
6839         thread->tsk = kthread_run(md_thread, thread,
6840                                   "%s_%s",
6841                                   mdname(thread->mddev),
6842                                   name);
6843         if (IS_ERR(thread->tsk)) {
6844                 kfree(thread);
6845                 return NULL;
6846         }
6847         return thread;
6848 }
6849
6850 void md_unregister_thread(struct md_thread **threadp)
6851 {
6852         struct md_thread *thread = *threadp;
6853         if (!thread)
6854                 return;
6855         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
6856         /* Locking ensures that mddev_unlock does not wake_up a
6857          * non-existent thread
6858          */
6859         spin_lock(&pers_lock);
6860         *threadp = NULL;
6861         spin_unlock(&pers_lock);
6862
6863         kthread_stop(thread->tsk);
6864         kfree(thread);
6865 }
6866
6867 void md_error(struct mddev *mddev, struct md_rdev *rdev)
6868 {
6869         if (!mddev) {
6870                 MD_BUG();
6871                 return;
6872         }
6873
6874         if (!rdev || test_bit(Faulty, &rdev->flags))
6875                 return;
6876
6877         if (!mddev->pers || !mddev->pers->error_handler)
6878                 return;
6879         mddev->pers->error_handler(mddev,rdev);
6880         if (mddev->degraded)
6881                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6882         sysfs_notify_dirent_safe(rdev->sysfs_state);
6883         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6884         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6885         md_wakeup_thread(mddev->thread);
6886         if (mddev->event_work.func)
6887                 queue_work(md_misc_wq, &mddev->event_work);
6888         md_new_event_inintr(mddev);
6889 }
6890
6891 /* seq_file implementation /proc/mdstat */
6892
6893 static void status_unused(struct seq_file *seq)
6894 {
6895         int i = 0;
6896         struct md_rdev *rdev;
6897
6898         seq_printf(seq, "unused devices: ");
6899
6900         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
6901                 char b[BDEVNAME_SIZE];
6902                 i++;
6903                 seq_printf(seq, "%s ",
6904                               bdevname(rdev->bdev,b));
6905         }
6906         if (!i)
6907                 seq_printf(seq, "<none>");
6908
6909         seq_printf(seq, "\n");
6910 }
6911
6912
6913 static void status_resync(struct seq_file *seq, struct mddev * mddev)
6914 {
6915         sector_t max_sectors, resync, res;
6916         unsigned long dt, db;
6917         sector_t rt;
6918         int scale;
6919         unsigned int per_milli;
6920
6921         if (mddev->curr_resync <= 3)
6922                 resync = 0;
6923         else
6924                 resync = mddev->curr_resync
6925                         - atomic_read(&mddev->recovery_active);
6926
6927         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
6928             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6929                 max_sectors = mddev->resync_max_sectors;
6930         else
6931                 max_sectors = mddev->dev_sectors;
6932
6933         /*
6934          * Should not happen.
6935          */
6936         if (!max_sectors) {
6937                 MD_BUG();
6938                 return;
6939         }
6940         /* Pick 'scale' such that (resync>>scale)*1000 will fit
6941          * in a sector_t, and (max_sectors>>scale) will fit in a
6942          * u32, as those are the requirements for sector_div.
6943          * Thus 'scale' must be at least 10
6944          */
6945         scale = 10;
6946         if (sizeof(sector_t) > sizeof(unsigned long)) {
6947                 while ( max_sectors/2 > (1ULL<<(scale+32)))
6948                         scale++;
6949         }
6950         res = (resync>>scale)*1000;
6951         sector_div(res, (u32)((max_sectors>>scale)+1));
6952
6953         per_milli = res;
6954         {
6955                 int i, x = per_milli/50, y = 20-x;
6956                 seq_printf(seq, "[");
6957                 for (i = 0; i < x; i++)
6958                         seq_printf(seq, "=");
6959                 seq_printf(seq, ">");
6960                 for (i = 0; i < y; i++)
6961                         seq_printf(seq, ".");
6962                 seq_printf(seq, "] ");
6963         }
6964         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
6965                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
6966                     "reshape" :
6967                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
6968                      "check" :
6969                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
6970                       "resync" : "recovery"))),
6971                    per_milli/10, per_milli % 10,
6972                    (unsigned long long) resync/2,
6973                    (unsigned long long) max_sectors/2);
6974
6975         /*
6976          * dt: time from mark until now
6977          * db: blocks written from mark until now
6978          * rt: remaining time
6979          *
6980          * rt is a sector_t, so could be 32bit or 64bit.
6981          * So we divide before multiply in case it is 32bit and close
6982          * to the limit.
6983          * We scale the divisor (db) by 32 to avoid losing precision
6984          * near the end of resync when the number of remaining sectors
6985          * is close to 'db'.
6986          * We then divide rt by 32 after multiplying by db to compensate.
6987          * The '+1' avoids division by zero if db is very small.
6988          */
6989         dt = ((jiffies - mddev->resync_mark) / HZ);
6990         if (!dt) dt++;
6991         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
6992                 - mddev->resync_mark_cnt;
6993
6994         rt = max_sectors - resync;    /* number of remaining sectors */
6995         sector_div(rt, db/32+1);
6996         rt *= dt;
6997         rt >>= 5;
6998
6999         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7000                    ((unsigned long)rt % 60)/6);
7001
7002         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7003 }
7004
7005 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7006 {
7007         struct list_head *tmp;
7008         loff_t l = *pos;
7009         struct mddev *mddev;
7010
7011         if (l >= 0x10000)
7012                 return NULL;
7013         if (!l--)
7014                 /* header */
7015                 return (void*)1;
7016
7017         spin_lock(&all_mddevs_lock);
7018         list_for_each(tmp,&all_mddevs)
7019                 if (!l--) {
7020                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7021                         mddev_get(mddev);
7022                         spin_unlock(&all_mddevs_lock);
7023                         return mddev;
7024                 }
7025         spin_unlock(&all_mddevs_lock);
7026         if (!l--)
7027                 return (void*)2;/* tail */
7028         return NULL;
7029 }
7030
7031 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7032 {
7033         struct list_head *tmp;
7034         struct mddev *next_mddev, *mddev = v;
7035         
7036         ++*pos;
7037         if (v == (void*)2)
7038                 return NULL;
7039
7040         spin_lock(&all_mddevs_lock);
7041         if (v == (void*)1)
7042                 tmp = all_mddevs.next;
7043         else
7044                 tmp = mddev->all_mddevs.next;
7045         if (tmp != &all_mddevs)
7046                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7047         else {
7048                 next_mddev = (void*)2;
7049                 *pos = 0x10000;
7050         }               
7051         spin_unlock(&all_mddevs_lock);
7052
7053         if (v != (void*)1)
7054                 mddev_put(mddev);
7055         return next_mddev;
7056
7057 }
7058
7059 static void md_seq_stop(struct seq_file *seq, void *v)
7060 {
7061         struct mddev *mddev = v;
7062
7063         if (mddev && v != (void*)1 && v != (void*)2)
7064                 mddev_put(mddev);
7065 }
7066
7067 static int md_seq_show(struct seq_file *seq, void *v)
7068 {
7069         struct mddev *mddev = v;
7070         sector_t sectors;
7071         struct md_rdev *rdev;
7072
7073         if (v == (void*)1) {
7074                 struct md_personality *pers;
7075                 seq_printf(seq, "Personalities : ");
7076                 spin_lock(&pers_lock);
7077                 list_for_each_entry(pers, &pers_list, list)
7078                         seq_printf(seq, "[%s] ", pers->name);
7079
7080                 spin_unlock(&pers_lock);
7081                 seq_printf(seq, "\n");
7082                 seq->poll_event = atomic_read(&md_event_count);
7083                 return 0;
7084         }
7085         if (v == (void*)2) {
7086                 status_unused(seq);
7087                 return 0;
7088         }
7089
7090         if (mddev_lock(mddev) < 0)
7091                 return -EINTR;
7092
7093         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7094                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7095                                                 mddev->pers ? "" : "in");
7096                 if (mddev->pers) {
7097                         if (mddev->ro==1)
7098                                 seq_printf(seq, " (read-only)");
7099                         if (mddev->ro==2)
7100                                 seq_printf(seq, " (auto-read-only)");
7101                         seq_printf(seq, " %s", mddev->pers->name);
7102                 }
7103
7104                 sectors = 0;
7105                 rdev_for_each(rdev, mddev) {
7106                         char b[BDEVNAME_SIZE];
7107                         seq_printf(seq, " %s[%d]",
7108                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7109                         if (test_bit(WriteMostly, &rdev->flags))
7110                                 seq_printf(seq, "(W)");
7111                         if (test_bit(Faulty, &rdev->flags)) {
7112                                 seq_printf(seq, "(F)");
7113                                 continue;
7114                         }
7115                         if (rdev->raid_disk < 0)
7116                                 seq_printf(seq, "(S)"); /* spare */
7117                         if (test_bit(Replacement, &rdev->flags))
7118                                 seq_printf(seq, "(R)");
7119                         sectors += rdev->sectors;
7120                 }
7121
7122                 if (!list_empty(&mddev->disks)) {
7123                         if (mddev->pers)
7124                                 seq_printf(seq, "\n      %llu blocks",
7125                                            (unsigned long long)
7126                                            mddev->array_sectors / 2);
7127                         else
7128                                 seq_printf(seq, "\n      %llu blocks",
7129                                            (unsigned long long)sectors / 2);
7130                 }
7131                 if (mddev->persistent) {
7132                         if (mddev->major_version != 0 ||
7133                             mddev->minor_version != 90) {
7134                                 seq_printf(seq," super %d.%d",
7135                                            mddev->major_version,
7136                                            mddev->minor_version);
7137                         }
7138                 } else if (mddev->external)
7139                         seq_printf(seq, " super external:%s",
7140                                    mddev->metadata_type);
7141                 else
7142                         seq_printf(seq, " super non-persistent");
7143
7144                 if (mddev->pers) {
7145                         mddev->pers->status(seq, mddev);
7146                         seq_printf(seq, "\n      ");
7147                         if (mddev->pers->sync_request) {
7148                                 if (mddev->curr_resync > 2) {
7149                                         status_resync(seq, mddev);
7150                                         seq_printf(seq, "\n      ");
7151                                 } else if (mddev->curr_resync >= 1)
7152                                         seq_printf(seq, "\tresync=DELAYED\n      ");
7153                                 else if (mddev->recovery_cp < MaxSector)
7154                                         seq_printf(seq, "\tresync=PENDING\n      ");
7155                         }
7156                 } else
7157                         seq_printf(seq, "\n       ");
7158
7159                 bitmap_status(seq, mddev->bitmap);
7160
7161                 seq_printf(seq, "\n");
7162         }
7163         mddev_unlock(mddev);
7164         
7165         return 0;
7166 }
7167
7168 static const struct seq_operations md_seq_ops = {
7169         .start  = md_seq_start,
7170         .next   = md_seq_next,
7171         .stop   = md_seq_stop,
7172         .show   = md_seq_show,
7173 };
7174
7175 static int md_seq_open(struct inode *inode, struct file *file)
7176 {
7177         struct seq_file *seq;
7178         int error;
7179
7180         error = seq_open(file, &md_seq_ops);
7181         if (error)
7182                 return error;
7183
7184         seq = file->private_data;
7185         seq->poll_event = atomic_read(&md_event_count);
7186         return error;
7187 }
7188
7189 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7190 {
7191         struct seq_file *seq = filp->private_data;
7192         int mask;
7193
7194         poll_wait(filp, &md_event_waiters, wait);
7195
7196         /* always allow read */
7197         mask = POLLIN | POLLRDNORM;
7198
7199         if (seq->poll_event != atomic_read(&md_event_count))
7200                 mask |= POLLERR | POLLPRI;
7201         return mask;
7202 }
7203
7204 static const struct file_operations md_seq_fops = {
7205         .owner          = THIS_MODULE,
7206         .open           = md_seq_open,
7207         .read           = seq_read,
7208         .llseek         = seq_lseek,
7209         .release        = seq_release_private,
7210         .poll           = mdstat_poll,
7211 };
7212
7213 int register_md_personality(struct md_personality *p)
7214 {
7215         spin_lock(&pers_lock);
7216         list_add_tail(&p->list, &pers_list);
7217         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
7218         spin_unlock(&pers_lock);
7219         return 0;
7220 }
7221
7222 int unregister_md_personality(struct md_personality *p)
7223 {
7224         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
7225         spin_lock(&pers_lock);
7226         list_del_init(&p->list);
7227         spin_unlock(&pers_lock);
7228         return 0;
7229 }
7230
7231 static int is_mddev_idle(struct mddev *mddev, int init)
7232 {
7233         struct md_rdev * rdev;
7234         int idle;
7235         int curr_events;
7236
7237         idle = 1;
7238         rcu_read_lock();
7239         rdev_for_each_rcu(rdev, mddev) {
7240                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7241                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7242                               (int)part_stat_read(&disk->part0, sectors[1]) -
7243                               atomic_read(&disk->sync_io);
7244                 /* sync IO will cause sync_io to increase before the disk_stats
7245                  * as sync_io is counted when a request starts, and
7246                  * disk_stats is counted when it completes.
7247                  * So resync activity will cause curr_events to be smaller than
7248                  * when there was no such activity.
7249                  * non-sync IO will cause disk_stat to increase without
7250                  * increasing sync_io so curr_events will (eventually)
7251                  * be larger than it was before.  Once it becomes
7252                  * substantially larger, the test below will cause
7253                  * the array to appear non-idle, and resync will slow
7254                  * down.
7255                  * If there is a lot of outstanding resync activity when
7256                  * we set last_event to curr_events, then all that activity
7257                  * completing might cause the array to appear non-idle
7258                  * and resync will be slowed down even though there might
7259                  * not have been non-resync activity.  This will only
7260                  * happen once though.  'last_events' will soon reflect
7261                  * the state where there is little or no outstanding
7262                  * resync requests, and further resync activity will
7263                  * always make curr_events less than last_events.
7264                  *
7265                  */
7266                 if (init || curr_events - rdev->last_events > 64) {
7267                         rdev->last_events = curr_events;
7268                         idle = 0;
7269                 }
7270         }
7271         rcu_read_unlock();
7272         return idle;
7273 }
7274
7275 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7276 {
7277         /* another "blocks" (512byte) blocks have been synced */
7278         atomic_sub(blocks, &mddev->recovery_active);
7279         wake_up(&mddev->recovery_wait);
7280         if (!ok) {
7281                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7282                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7283                 md_wakeup_thread(mddev->thread);
7284                 // stop recovery, signal do_sync ....
7285         }
7286 }
7287
7288
7289 /* md_write_start(mddev, bi)
7290  * If we need to update some array metadata (e.g. 'active' flag
7291  * in superblock) before writing, schedule a superblock update
7292  * and wait for it to complete.
7293  */
7294 void md_write_start(struct mddev *mddev, struct bio *bi)
7295 {
7296         int did_change = 0;
7297         if (bio_data_dir(bi) != WRITE)
7298                 return;
7299
7300         BUG_ON(mddev->ro == 1);
7301         if (mddev->ro == 2) {
7302                 /* need to switch to read/write */
7303                 mddev->ro = 0;
7304                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7305                 md_wakeup_thread(mddev->thread);
7306                 md_wakeup_thread(mddev->sync_thread);
7307                 did_change = 1;
7308         }
7309         atomic_inc(&mddev->writes_pending);
7310         if (mddev->safemode == 1)
7311                 mddev->safemode = 0;
7312         if (mddev->in_sync) {
7313                 spin_lock_irq(&mddev->write_lock);
7314                 if (mddev->in_sync) {
7315                         mddev->in_sync = 0;
7316                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7317                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7318                         md_wakeup_thread(mddev->thread);
7319                         did_change = 1;
7320                 }
7321                 spin_unlock_irq(&mddev->write_lock);
7322         }
7323         if (did_change)
7324                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7325         wait_event(mddev->sb_wait,
7326                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7327 }
7328
7329 void md_write_end(struct mddev *mddev)
7330 {
7331         if (atomic_dec_and_test(&mddev->writes_pending)) {
7332                 if (mddev->safemode == 2)
7333                         md_wakeup_thread(mddev->thread);
7334                 else if (mddev->safemode_delay)
7335                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7336         }
7337 }
7338
7339 /* md_allow_write(mddev)
7340  * Calling this ensures that the array is marked 'active' so that writes
7341  * may proceed without blocking.  It is important to call this before
7342  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7343  * Must be called with mddev_lock held.
7344  *
7345  * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
7346  * is dropped, so return -EAGAIN after notifying userspace.
7347  */
7348 int md_allow_write(struct mddev *mddev)
7349 {
7350         if (!mddev->pers)
7351                 return 0;
7352         if (mddev->ro)
7353                 return 0;
7354         if (!mddev->pers->sync_request)
7355                 return 0;
7356
7357         spin_lock_irq(&mddev->write_lock);
7358         if (mddev->in_sync) {
7359                 mddev->in_sync = 0;
7360                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7361                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7362                 if (mddev->safemode_delay &&
7363                     mddev->safemode == 0)
7364                         mddev->safemode = 1;
7365                 spin_unlock_irq(&mddev->write_lock);
7366                 md_update_sb(mddev, 0);
7367                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7368         } else
7369                 spin_unlock_irq(&mddev->write_lock);
7370
7371         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7372                 return -EAGAIN;
7373         else
7374                 return 0;
7375 }
7376 EXPORT_SYMBOL_GPL(md_allow_write);
7377
7378 #define SYNC_MARKS      10
7379 #define SYNC_MARK_STEP  (3*HZ)
7380 #define UPDATE_FREQUENCY (5*60*HZ)
7381 void md_do_sync(struct md_thread *thread)
7382 {
7383         struct mddev *mddev = thread->mddev;
7384         struct mddev *mddev2;
7385         unsigned int currspeed = 0,
7386                  window;
7387         sector_t max_sectors,j, io_sectors;
7388         unsigned long mark[SYNC_MARKS];
7389         unsigned long update_time;
7390         sector_t mark_cnt[SYNC_MARKS];
7391         int last_mark,m;
7392         struct list_head *tmp;
7393         sector_t last_check;
7394         int skipped = 0;
7395         struct md_rdev *rdev;
7396         char *desc, *action = NULL;
7397         struct blk_plug plug;
7398
7399         /* just incase thread restarts... */
7400         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7401                 return;
7402         if (mddev->ro) {/* never try to sync a read-only array */
7403                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7404                 return;
7405         }
7406
7407         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7408                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7409                         desc = "data-check";
7410                         action = "check";
7411                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7412                         desc = "requested-resync";
7413                         action = "repair";
7414                 } else
7415                         desc = "resync";
7416         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7417                 desc = "reshape";
7418         else
7419                 desc = "recovery";
7420
7421         mddev->last_sync_action = action ?: desc;
7422
7423         /* we overload curr_resync somewhat here.
7424          * 0 == not engaged in resync at all
7425          * 2 == checking that there is no conflict with another sync
7426          * 1 == like 2, but have yielded to allow conflicting resync to
7427          *              commense
7428          * other == active in resync - this many blocks
7429          *
7430          * Before starting a resync we must have set curr_resync to
7431          * 2, and then checked that every "conflicting" array has curr_resync
7432          * less than ours.  When we find one that is the same or higher
7433          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7434          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7435          * This will mean we have to start checking from the beginning again.
7436          *
7437          */
7438
7439         do {
7440                 mddev->curr_resync = 2;
7441
7442         try_again:
7443                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7444                         goto skip;
7445                 for_each_mddev(mddev2, tmp) {
7446                         if (mddev2 == mddev)
7447                                 continue;
7448                         if (!mddev->parallel_resync
7449                         &&  mddev2->curr_resync
7450                         &&  match_mddev_units(mddev, mddev2)) {
7451                                 DEFINE_WAIT(wq);
7452                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7453                                         /* arbitrarily yield */
7454                                         mddev->curr_resync = 1;
7455                                         wake_up(&resync_wait);
7456                                 }
7457                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7458                                         /* no need to wait here, we can wait the next
7459                                          * time 'round when curr_resync == 2
7460                                          */
7461                                         continue;
7462                                 /* We need to wait 'interruptible' so as not to
7463                                  * contribute to the load average, and not to
7464                                  * be caught by 'softlockup'
7465                                  */
7466                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7467                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7468                                     mddev2->curr_resync >= mddev->curr_resync) {
7469                                         printk(KERN_INFO "md: delaying %s of %s"
7470                                                " until %s has finished (they"
7471                                                " share one or more physical units)\n",
7472                                                desc, mdname(mddev), mdname(mddev2));
7473                                         mddev_put(mddev2);
7474                                         if (signal_pending(current))
7475                                                 flush_signals(current);
7476                                         schedule();
7477                                         finish_wait(&resync_wait, &wq);
7478                                         goto try_again;
7479                                 }
7480                                 finish_wait(&resync_wait, &wq);
7481                         }
7482                 }
7483         } while (mddev->curr_resync < 2);
7484
7485         j = 0;
7486         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7487                 /* resync follows the size requested by the personality,
7488                  * which defaults to physical size, but can be virtual size
7489                  */
7490                 max_sectors = mddev->resync_max_sectors;
7491                 atomic64_set(&mddev->resync_mismatches, 0);
7492                 /* we don't use the checkpoint if there's a bitmap */
7493                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7494                         j = mddev->resync_min;
7495                 else if (!mddev->bitmap)
7496                         j = mddev->recovery_cp;
7497
7498         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7499                 max_sectors = mddev->resync_max_sectors;
7500         else {
7501                 /* recovery follows the physical size of devices */
7502                 max_sectors = mddev->dev_sectors;
7503                 j = MaxSector;
7504                 rcu_read_lock();
7505                 rdev_for_each_rcu(rdev, mddev)
7506                         if (rdev->raid_disk >= 0 &&
7507                             !test_bit(Faulty, &rdev->flags) &&
7508                             !test_bit(In_sync, &rdev->flags) &&
7509                             rdev->recovery_offset < j)
7510                                 j = rdev->recovery_offset;
7511                 rcu_read_unlock();
7512
7513                 /* If there is a bitmap, we need to make sure all
7514                  * writes that started before we added a spare
7515                  * complete before we start doing a recovery.
7516                  * Otherwise the write might complete and (via
7517                  * bitmap_endwrite) set a bit in the bitmap after the
7518                  * recovery has checked that bit and skipped that
7519                  * region.
7520                  */
7521                 if (mddev->bitmap) {
7522                         mddev->pers->quiesce(mddev, 1);
7523                         mddev->pers->quiesce(mddev, 0);
7524                 }
7525         }
7526
7527         printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
7528         printk(KERN_INFO "md: minimum _guaranteed_  speed:"
7529                 " %d KB/sec/disk.\n", speed_min(mddev));
7530         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
7531                "(but not more than %d KB/sec) for %s.\n",
7532                speed_max(mddev), desc);
7533
7534         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7535
7536         io_sectors = 0;
7537         for (m = 0; m < SYNC_MARKS; m++) {
7538                 mark[m] = jiffies;
7539                 mark_cnt[m] = io_sectors;
7540         }
7541         last_mark = 0;
7542         mddev->resync_mark = mark[last_mark];
7543         mddev->resync_mark_cnt = mark_cnt[last_mark];
7544
7545         /*
7546          * Tune reconstruction:
7547          */
7548         window = 32*(PAGE_SIZE/512);
7549         printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
7550                 window/2, (unsigned long long)max_sectors/2);
7551
7552         atomic_set(&mddev->recovery_active, 0);
7553         last_check = 0;
7554
7555         if (j>2) {
7556                 printk(KERN_INFO
7557                        "md: resuming %s of %s from checkpoint.\n",
7558                        desc, mdname(mddev));
7559                 mddev->curr_resync = j;
7560         } else
7561                 mddev->curr_resync = 3; /* no longer delayed */
7562         mddev->curr_resync_completed = j;
7563         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7564         md_new_event(mddev);
7565         update_time = jiffies;
7566
7567         blk_start_plug(&plug);
7568         while (j < max_sectors) {
7569                 sector_t sectors;
7570
7571                 skipped = 0;
7572
7573                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7574                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7575                       (mddev->curr_resync - mddev->curr_resync_completed)
7576                       > (max_sectors >> 4)) ||
7577                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7578                      (j - mddev->curr_resync_completed)*2
7579                      >= mddev->resync_max - mddev->curr_resync_completed
7580                             )) {
7581                         /* time to update curr_resync_completed */
7582                         wait_event(mddev->recovery_wait,
7583                                    atomic_read(&mddev->recovery_active) == 0);
7584                         mddev->curr_resync_completed = j;
7585                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7586                             j > mddev->recovery_cp)
7587                                 mddev->recovery_cp = j;
7588                         update_time = jiffies;
7589                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7590                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7591                 }
7592
7593                 while (j >= mddev->resync_max &&
7594                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7595                         /* As this condition is controlled by user-space,
7596                          * we can block indefinitely, so use '_interruptible'
7597                          * to avoid triggering warnings.
7598                          */
7599                         flush_signals(current); /* just in case */
7600                         wait_event_interruptible(mddev->recovery_wait,
7601                                                  mddev->resync_max > j
7602                                                  || test_bit(MD_RECOVERY_INTR,
7603                                                              &mddev->recovery));
7604                 }
7605
7606                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7607                         break;
7608
7609                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
7610                                                   currspeed < speed_min(mddev));
7611                 if (sectors == 0) {
7612                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7613                         break;
7614                 }
7615
7616                 if (!skipped) { /* actual IO requested */
7617                         io_sectors += sectors;
7618                         atomic_add(sectors, &mddev->recovery_active);
7619                 }
7620
7621                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7622                         break;
7623
7624                 j += sectors;
7625                 if (j > 2)
7626                         mddev->curr_resync = j;
7627                 mddev->curr_mark_cnt = io_sectors;
7628                 if (last_check == 0)
7629                         /* this is the earliest that rebuild will be
7630                          * visible in /proc/mdstat
7631                          */
7632                         md_new_event(mddev);
7633
7634                 if (last_check + window > io_sectors || j == max_sectors)
7635                         continue;
7636
7637                 last_check = io_sectors;
7638         repeat:
7639                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
7640                         /* step marks */
7641                         int next = (last_mark+1) % SYNC_MARKS;
7642
7643                         mddev->resync_mark = mark[next];
7644                         mddev->resync_mark_cnt = mark_cnt[next];
7645                         mark[next] = jiffies;
7646                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
7647                         last_mark = next;
7648                 }
7649
7650                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7651                         break;
7652
7653                 /*
7654                  * this loop exits only if either when we are slower than
7655                  * the 'hard' speed limit, or the system was IO-idle for
7656                  * a jiffy.
7657                  * the system might be non-idle CPU-wise, but we only care
7658                  * about not overloading the IO subsystem. (things like an
7659                  * e2fsck being done on the RAID array should execute fast)
7660                  */
7661                 cond_resched();
7662
7663                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
7664                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
7665
7666                 if (currspeed > speed_min(mddev)) {
7667                         if ((currspeed > speed_max(mddev)) ||
7668                                         !is_mddev_idle(mddev, 0)) {
7669                                 msleep(500);
7670                                 goto repeat;
7671                         }
7672                 }
7673         }
7674         printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
7675                test_bit(MD_RECOVERY_INTR, &mddev->recovery)
7676                ? "interrupted" : "done");
7677         /*
7678          * this also signals 'finished resyncing' to md_stop
7679          */
7680         blk_finish_plug(&plug);
7681         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
7682
7683         /* tell personality that we are finished */
7684         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
7685
7686         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
7687             mddev->curr_resync > 2) {
7688                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7689                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7690                                 if (mddev->curr_resync >= mddev->recovery_cp) {
7691                                         printk(KERN_INFO
7692                                                "md: checkpointing %s of %s.\n",
7693                                                desc, mdname(mddev));
7694                                         if (test_bit(MD_RECOVERY_ERROR,
7695                                                 &mddev->recovery))
7696                                                 mddev->recovery_cp =
7697                                                         mddev->curr_resync_completed;
7698                                         else
7699                                                 mddev->recovery_cp =
7700                                                         mddev->curr_resync;
7701                                 }
7702                         } else
7703                                 mddev->recovery_cp = MaxSector;
7704                 } else {
7705                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7706                                 mddev->curr_resync = MaxSector;
7707                         rcu_read_lock();
7708                         rdev_for_each_rcu(rdev, mddev)
7709                                 if (rdev->raid_disk >= 0 &&
7710                                     mddev->delta_disks >= 0 &&
7711                                     !test_bit(Faulty, &rdev->flags) &&
7712                                     !test_bit(In_sync, &rdev->flags) &&
7713                                     rdev->recovery_offset < mddev->curr_resync)
7714                                         rdev->recovery_offset = mddev->curr_resync;
7715                         rcu_read_unlock();
7716                 }
7717         }
7718  skip:
7719         set_bit(MD_CHANGE_DEVS, &mddev->flags);
7720
7721         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
7722                 /* We completed so min/max setting can be forgotten if used. */
7723                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7724                         mddev->resync_min = 0;
7725                 mddev->resync_max = MaxSector;
7726         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7727                 mddev->resync_min = mddev->curr_resync_completed;
7728         mddev->curr_resync = 0;
7729         wake_up(&resync_wait);
7730         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
7731         md_wakeup_thread(mddev->thread);
7732         return;
7733 }
7734 EXPORT_SYMBOL_GPL(md_do_sync);
7735
7736 static int remove_and_add_spares(struct mddev *mddev,
7737                                  struct md_rdev *this)
7738 {
7739         struct md_rdev *rdev;
7740         int spares = 0;
7741         int removed = 0;
7742
7743         rdev_for_each(rdev, mddev)
7744                 if ((this == NULL || rdev == this) &&
7745                     rdev->raid_disk >= 0 &&
7746                     !test_bit(Blocked, &rdev->flags) &&
7747                     (test_bit(Faulty, &rdev->flags) ||
7748                      ! test_bit(In_sync, &rdev->flags)) &&
7749                     atomic_read(&rdev->nr_pending)==0) {
7750                         if (mddev->pers->hot_remove_disk(
7751                                     mddev, rdev) == 0) {
7752                                 sysfs_unlink_rdev(mddev, rdev);
7753                                 rdev->raid_disk = -1;
7754                                 removed++;
7755                         }
7756                 }
7757         if (removed && mddev->kobj.sd)
7758                 sysfs_notify(&mddev->kobj, NULL, "degraded");
7759
7760         if (this)
7761                 goto no_add;
7762
7763         rdev_for_each(rdev, mddev) {
7764                 if (rdev->raid_disk >= 0 &&
7765                     !test_bit(In_sync, &rdev->flags) &&
7766                     !test_bit(Faulty, &rdev->flags))
7767                         spares++;
7768                 if (rdev->raid_disk >= 0)
7769                         continue;
7770                 if (test_bit(Faulty, &rdev->flags))
7771                         continue;
7772                 if (mddev->ro &&
7773                     ! (rdev->saved_raid_disk >= 0 &&
7774                        !test_bit(Bitmap_sync, &rdev->flags)))
7775                         continue;
7776
7777                 if (rdev->saved_raid_disk < 0)
7778                         rdev->recovery_offset = 0;
7779                 if (mddev->pers->
7780                     hot_add_disk(mddev, rdev) == 0) {
7781                         if (sysfs_link_rdev(mddev, rdev))
7782                                 /* failure here is OK */;
7783                         spares++;
7784                         md_new_event(mddev);
7785                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
7786                 }
7787         }
7788 no_add:
7789         if (removed)
7790                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
7791         return spares;
7792 }
7793
7794 /*
7795  * This routine is regularly called by all per-raid-array threads to
7796  * deal with generic issues like resync and super-block update.
7797  * Raid personalities that don't have a thread (linear/raid0) do not
7798  * need this as they never do any recovery or update the superblock.
7799  *
7800  * It does not do any resync itself, but rather "forks" off other threads
7801  * to do that as needed.
7802  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
7803  * "->recovery" and create a thread at ->sync_thread.
7804  * When the thread finishes it sets MD_RECOVERY_DONE
7805  * and wakeups up this thread which will reap the thread and finish up.
7806  * This thread also removes any faulty devices (with nr_pending == 0).
7807  *
7808  * The overall approach is:
7809  *  1/ if the superblock needs updating, update it.
7810  *  2/ If a recovery thread is running, don't do anything else.
7811  *  3/ If recovery has finished, clean up, possibly marking spares active.
7812  *  4/ If there are any faulty devices, remove them.
7813  *  5/ If array is degraded, try to add spares devices
7814  *  6/ If array has spares or is not in-sync, start a resync thread.
7815  */
7816 void md_check_recovery(struct mddev *mddev)
7817 {
7818         if (mddev->suspended)
7819                 return;
7820
7821         if (mddev->bitmap)
7822                 bitmap_daemon_work(mddev);
7823
7824         if (signal_pending(current)) {
7825                 if (mddev->pers->sync_request && !mddev->external) {
7826                         printk(KERN_INFO "md: %s in immediate safe mode\n",
7827                                mdname(mddev));
7828                         mddev->safemode = 2;
7829                 }
7830                 flush_signals(current);
7831         }
7832
7833         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
7834                 return;
7835         if ( ! (
7836                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
7837                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7838                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
7839                 (mddev->external == 0 && mddev->safemode == 1) ||
7840                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
7841                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
7842                 ))
7843                 return;
7844
7845         if (mddev_trylock(mddev)) {
7846                 int spares = 0;
7847
7848                 if (mddev->ro) {
7849                         /* On a read-only array we can:
7850                          * - remove failed devices
7851                          * - add already-in_sync devices if the array itself
7852                          *   is in-sync.
7853                          * As we only add devices that are already in-sync,
7854                          * we can activate the spares immediately.
7855                          */
7856                         remove_and_add_spares(mddev, NULL);
7857                         /* There is no thread, but we need to call
7858                          * ->spare_active and clear saved_raid_disk
7859                          */
7860                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7861                         md_reap_sync_thread(mddev);
7862                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7863                         goto unlock;
7864                 }
7865
7866                 if (!mddev->external) {
7867                         int did_change = 0;
7868                         spin_lock_irq(&mddev->write_lock);
7869                         if (mddev->safemode &&
7870                             !atomic_read(&mddev->writes_pending) &&
7871                             !mddev->in_sync &&
7872                             mddev->recovery_cp == MaxSector) {
7873                                 mddev->in_sync = 1;
7874                                 did_change = 1;
7875                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7876                         }
7877                         if (mddev->safemode == 1)
7878                                 mddev->safemode = 0;
7879                         spin_unlock_irq(&mddev->write_lock);
7880                         if (did_change)
7881                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7882                 }
7883
7884                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
7885                         md_update_sb(mddev, 0);
7886
7887                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
7888                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
7889                         /* resync/recovery still happening */
7890                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7891                         goto unlock;
7892                 }
7893                 if (mddev->sync_thread) {
7894                         md_reap_sync_thread(mddev);
7895                         goto unlock;
7896                 }
7897                 /* Set RUNNING before clearing NEEDED to avoid
7898                  * any transients in the value of "sync_action".
7899                  */
7900                 mddev->curr_resync_completed = 0;
7901                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7902                 /* Clear some bits that don't mean anything, but
7903                  * might be left set
7904                  */
7905                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7906                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7907
7908                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7909                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7910                         goto unlock;
7911                 /* no recovery is running.
7912                  * remove any failed drives, then
7913                  * add spares if possible.
7914                  * Spares are also removed and re-added, to allow
7915                  * the personality to fail the re-add.
7916                  */
7917
7918                 if (mddev->reshape_position != MaxSector) {
7919                         if (mddev->pers->check_reshape == NULL ||
7920                             mddev->pers->check_reshape(mddev) != 0)
7921                                 /* Cannot proceed */
7922                                 goto unlock;
7923                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7924                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7925                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
7926                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7927                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7928                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7929                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7930                 } else if (mddev->recovery_cp < MaxSector) {
7931                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7932                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7933                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
7934                         /* nothing to be done ... */
7935                         goto unlock;
7936
7937                 if (mddev->pers->sync_request) {
7938                         if (spares) {
7939                                 /* We are adding a device or devices to an array
7940                                  * which has the bitmap stored on all devices.
7941                                  * So make sure all bitmap pages get written
7942                                  */
7943                                 bitmap_write_all(mddev->bitmap);
7944                         }
7945                         mddev->sync_thread = md_register_thread(md_do_sync,
7946                                                                 mddev,
7947                                                                 "resync");
7948                         if (!mddev->sync_thread) {
7949                                 printk(KERN_ERR "%s: could not start resync"
7950                                         " thread...\n", 
7951                                         mdname(mddev));
7952                                 /* leave the spares where they are, it shouldn't hurt */
7953                                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7954                                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7955                                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7956                                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
7957                                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7958                         } else
7959                                 md_wakeup_thread(mddev->sync_thread);
7960                         sysfs_notify_dirent_safe(mddev->sysfs_action);
7961                         md_new_event(mddev);
7962                 }
7963         unlock:
7964                 wake_up(&mddev->sb_wait);
7965
7966                 if (!mddev->sync_thread) {
7967                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7968                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
7969                                                &mddev->recovery))
7970                                 if (mddev->sysfs_action)
7971                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
7972                 }
7973                 mddev_unlock(mddev);
7974         }
7975 }
7976
7977 void md_reap_sync_thread(struct mddev *mddev)
7978 {
7979         struct md_rdev *rdev;
7980
7981         /* resync has finished, collect result */
7982         md_unregister_thread(&mddev->sync_thread);
7983         wake_up(&resync_wait);
7984         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7985             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7986                 /* success...*/
7987                 /* activate any spares */
7988                 if (mddev->pers->spare_active(mddev)) {
7989                         sysfs_notify(&mddev->kobj, NULL,
7990                                      "degraded");
7991                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
7992                 }
7993         }
7994         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7995             mddev->pers->finish_reshape)
7996                 mddev->pers->finish_reshape(mddev);
7997
7998         /* If array is no-longer degraded, then any saved_raid_disk
7999          * information must be scrapped.
8000          */
8001         if (!mddev->degraded)
8002                 rdev_for_each(rdev, mddev)
8003                         rdev->saved_raid_disk = -1;
8004
8005         md_update_sb(mddev, 1);
8006         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8007         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8008         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8009         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8010         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8011         /* flag recovery needed just to double check */
8012         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8013         sysfs_notify_dirent_safe(mddev->sysfs_action);
8014         md_new_event(mddev);
8015         if (mddev->event_work.func)
8016                 queue_work(md_misc_wq, &mddev->event_work);
8017 }
8018
8019 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8020 {
8021         sysfs_notify_dirent_safe(rdev->sysfs_state);
8022         wait_event_timeout(rdev->blocked_wait,
8023                            !test_bit(Blocked, &rdev->flags) &&
8024                            !test_bit(BlockedBadBlocks, &rdev->flags),
8025                            msecs_to_jiffies(5000));
8026         rdev_dec_pending(rdev, mddev);
8027 }
8028 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8029
8030 void md_finish_reshape(struct mddev *mddev)
8031 {
8032         /* called be personality module when reshape completes. */
8033         struct md_rdev *rdev;
8034
8035         rdev_for_each(rdev, mddev) {
8036                 if (rdev->data_offset > rdev->new_data_offset)
8037                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8038                 else
8039                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8040                 rdev->data_offset = rdev->new_data_offset;
8041         }
8042 }
8043 EXPORT_SYMBOL(md_finish_reshape);
8044
8045 /* Bad block management.
8046  * We can record which blocks on each device are 'bad' and so just
8047  * fail those blocks, or that stripe, rather than the whole device.
8048  * Entries in the bad-block table are 64bits wide.  This comprises:
8049  * Length of bad-range, in sectors: 0-511 for lengths 1-512
8050  * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes)
8051  *  A 'shift' can be set so that larger blocks are tracked and
8052  *  consequently larger devices can be covered.
8053  * 'Acknowledged' flag - 1 bit. - the most significant bit.
8054  *
8055  * Locking of the bad-block table uses a seqlock so md_is_badblock
8056  * might need to retry if it is very unlucky.
8057  * We will sometimes want to check for bad blocks in a bi_end_io function,
8058  * so we use the write_seqlock_irq variant.
8059  *
8060  * When looking for a bad block we specify a range and want to
8061  * know if any block in the range is bad.  So we binary-search
8062  * to the last range that starts at-or-before the given endpoint,
8063  * (or "before the sector after the target range")
8064  * then see if it ends after the given start.
8065  * We return
8066  *  0 if there are no known bad blocks in the range
8067  *  1 if there are known bad block which are all acknowledged
8068  * -1 if there are bad blocks which have not yet been acknowledged in metadata.
8069  * plus the start/length of the first bad section we overlap.
8070  */
8071 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
8072                    sector_t *first_bad, int *bad_sectors)
8073 {
8074         int hi;
8075         int lo;
8076         u64 *p = bb->page;
8077         int rv;
8078         sector_t target = s + sectors;
8079         unsigned seq;
8080
8081         if (bb->shift > 0) {
8082                 /* round the start down, and the end up */
8083                 s >>= bb->shift;
8084                 target += (1<<bb->shift) - 1;
8085                 target >>= bb->shift;
8086                 sectors = target - s;
8087         }
8088         /* 'target' is now the first block after the bad range */
8089
8090 retry:
8091         seq = read_seqbegin(&bb->lock);
8092         lo = 0;
8093         rv = 0;
8094         hi = bb->count;
8095
8096         /* Binary search between lo and hi for 'target'
8097          * i.e. for the last range that starts before 'target'
8098          */
8099         /* INVARIANT: ranges before 'lo' and at-or-after 'hi'
8100          * are known not to be the last range before target.
8101          * VARIANT: hi-lo is the number of possible
8102          * ranges, and decreases until it reaches 1
8103          */
8104         while (hi - lo > 1) {
8105                 int mid = (lo + hi) / 2;
8106                 sector_t a = BB_OFFSET(p[mid]);
8107                 if (a < target)
8108                         /* This could still be the one, earlier ranges
8109                          * could not. */
8110                         lo = mid;
8111                 else
8112                         /* This and later ranges are definitely out. */
8113                         hi = mid;
8114         }
8115         /* 'lo' might be the last that started before target, but 'hi' isn't */
8116         if (hi > lo) {
8117                 /* need to check all range that end after 's' to see if
8118                  * any are unacknowledged.
8119                  */
8120                 while (lo >= 0 &&
8121                        BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8122                         if (BB_OFFSET(p[lo]) < target) {
8123                                 /* starts before the end, and finishes after
8124                                  * the start, so they must overlap
8125                                  */
8126                                 if (rv != -1 && BB_ACK(p[lo]))
8127                                         rv = 1;
8128                                 else
8129                                         rv = -1;
8130                                 *first_bad = BB_OFFSET(p[lo]);
8131                                 *bad_sectors = BB_LEN(p[lo]);
8132                         }
8133                         lo--;
8134                 }
8135         }
8136
8137         if (read_seqretry(&bb->lock, seq))
8138                 goto retry;
8139
8140         return rv;
8141 }
8142 EXPORT_SYMBOL_GPL(md_is_badblock);
8143
8144 /*
8145  * Add a range of bad blocks to the table.
8146  * This might extend the table, or might contract it
8147  * if two adjacent ranges can be merged.
8148  * We binary-search to find the 'insertion' point, then
8149  * decide how best to handle it.
8150  */
8151 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
8152                             int acknowledged)
8153 {
8154         u64 *p;
8155         int lo, hi;
8156         int rv = 1;
8157         unsigned long flags;
8158
8159         if (bb->shift < 0)
8160                 /* badblocks are disabled */
8161                 return 0;
8162
8163         if (bb->shift) {
8164                 /* round the start down, and the end up */
8165                 sector_t next = s + sectors;
8166                 s >>= bb->shift;
8167                 next += (1<<bb->shift) - 1;
8168                 next >>= bb->shift;
8169                 sectors = next - s;
8170         }
8171
8172         write_seqlock_irqsave(&bb->lock, flags);
8173
8174         p = bb->page;
8175         lo = 0;
8176         hi = bb->count;
8177         /* Find the last range that starts at-or-before 's' */
8178         while (hi - lo > 1) {
8179                 int mid = (lo + hi) / 2;
8180                 sector_t a = BB_OFFSET(p[mid]);
8181                 if (a <= s)
8182                         lo = mid;
8183                 else
8184                         hi = mid;
8185         }
8186         if (hi > lo && BB_OFFSET(p[lo]) > s)
8187                 hi = lo;
8188
8189         if (hi > lo) {
8190                 /* we found a range that might merge with the start
8191                  * of our new range
8192                  */
8193                 sector_t a = BB_OFFSET(p[lo]);
8194                 sector_t e = a + BB_LEN(p[lo]);
8195                 int ack = BB_ACK(p[lo]);
8196                 if (e >= s) {
8197                         /* Yes, we can merge with a previous range */
8198                         if (s == a && s + sectors >= e)
8199                                 /* new range covers old */
8200                                 ack = acknowledged;
8201                         else
8202                                 ack = ack && acknowledged;
8203
8204                         if (e < s + sectors)
8205                                 e = s + sectors;
8206                         if (e - a <= BB_MAX_LEN) {
8207                                 p[lo] = BB_MAKE(a, e-a, ack);
8208                                 s = e;
8209                         } else {
8210                                 /* does not all fit in one range,
8211                                  * make p[lo] maximal
8212                                  */
8213                                 if (BB_LEN(p[lo]) != BB_MAX_LEN)
8214                                         p[lo] = BB_MAKE(a, BB_MAX_LEN, ack);
8215                                 s = a + BB_MAX_LEN;
8216                         }
8217                         sectors = e - s;
8218                 }
8219         }
8220         if (sectors && hi < bb->count) {
8221                 /* 'hi' points to the first range that starts after 's'.
8222                  * Maybe we can merge with the start of that range */
8223                 sector_t a = BB_OFFSET(p[hi]);
8224                 sector_t e = a + BB_LEN(p[hi]);
8225                 int ack = BB_ACK(p[hi]);
8226                 if (a <= s + sectors) {
8227                         /* merging is possible */
8228                         if (e <= s + sectors) {
8229                                 /* full overlap */
8230                                 e = s + sectors;
8231                                 ack = acknowledged;
8232                         } else
8233                                 ack = ack && acknowledged;
8234
8235                         a = s;
8236                         if (e - a <= BB_MAX_LEN) {
8237                                 p[hi] = BB_MAKE(a, e-a, ack);
8238                                 s = e;
8239                         } else {
8240                                 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack);
8241                                 s = a + BB_MAX_LEN;
8242                         }
8243                         sectors = e - s;
8244                         lo = hi;
8245                         hi++;
8246                 }
8247         }
8248         if (sectors == 0 && hi < bb->count) {
8249                 /* we might be able to combine lo and hi */
8250                 /* Note: 's' is at the end of 'lo' */
8251                 sector_t a = BB_OFFSET(p[hi]);
8252                 int lolen = BB_LEN(p[lo]);
8253                 int hilen = BB_LEN(p[hi]);
8254                 int newlen = lolen + hilen - (s - a);
8255                 if (s >= a && newlen < BB_MAX_LEN) {
8256                         /* yes, we can combine them */
8257                         int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]);
8258                         p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack);
8259                         memmove(p + hi, p + hi + 1,
8260                                 (bb->count - hi - 1) * 8);
8261                         bb->count--;
8262                 }
8263         }
8264         while (sectors) {
8265                 /* didn't merge (it all).
8266                  * Need to add a range just before 'hi' */
8267                 if (bb->count >= MD_MAX_BADBLOCKS) {
8268                         /* No room for more */
8269                         rv = 0;
8270                         break;
8271                 } else {
8272                         int this_sectors = sectors;
8273                         memmove(p + hi + 1, p + hi,
8274                                 (bb->count - hi) * 8);
8275                         bb->count++;
8276
8277                         if (this_sectors > BB_MAX_LEN)
8278                                 this_sectors = BB_MAX_LEN;
8279                         p[hi] = BB_MAKE(s, this_sectors, acknowledged);
8280                         sectors -= this_sectors;
8281                         s += this_sectors;
8282                 }
8283         }
8284
8285         bb->changed = 1;
8286         if (!acknowledged)
8287                 bb->unacked_exist = 1;
8288         write_sequnlock_irqrestore(&bb->lock, flags);
8289
8290         return rv;
8291 }
8292
8293 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8294                        int is_new)
8295 {
8296         int rv;
8297         if (is_new)
8298                 s += rdev->new_data_offset;
8299         else
8300                 s += rdev->data_offset;
8301         rv = md_set_badblocks(&rdev->badblocks,
8302                               s, sectors, 0);
8303         if (rv) {
8304                 /* Make sure they get written out promptly */
8305                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8306                 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
8307                 md_wakeup_thread(rdev->mddev->thread);
8308         }
8309         return rv;
8310 }
8311 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8312
8313 /*
8314  * Remove a range of bad blocks from the table.
8315  * This may involve extending the table if we spilt a region,
8316  * but it must not fail.  So if the table becomes full, we just
8317  * drop the remove request.
8318  */
8319 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors)
8320 {
8321         u64 *p;
8322         int lo, hi;
8323         sector_t target = s + sectors;
8324         int rv = 0;
8325
8326         if (bb->shift > 0) {
8327                 /* When clearing we round the start up and the end down.
8328                  * This should not matter as the shift should align with
8329                  * the block size and no rounding should ever be needed.
8330                  * However it is better the think a block is bad when it
8331                  * isn't than to think a block is not bad when it is.
8332                  */
8333                 s += (1<<bb->shift) - 1;
8334                 s >>= bb->shift;
8335                 target >>= bb->shift;
8336                 sectors = target - s;
8337         }
8338
8339         write_seqlock_irq(&bb->lock);
8340
8341         p = bb->page;
8342         lo = 0;
8343         hi = bb->count;
8344         /* Find the last range that starts before 'target' */
8345         while (hi - lo > 1) {
8346                 int mid = (lo + hi) / 2;
8347                 sector_t a = BB_OFFSET(p[mid]);
8348                 if (a < target)
8349                         lo = mid;
8350                 else
8351                         hi = mid;
8352         }
8353         if (hi > lo) {
8354                 /* p[lo] is the last range that could overlap the
8355                  * current range.  Earlier ranges could also overlap,
8356                  * but only this one can overlap the end of the range.
8357                  */
8358                 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
8359                         /* Partial overlap, leave the tail of this range */
8360                         int ack = BB_ACK(p[lo]);
8361                         sector_t a = BB_OFFSET(p[lo]);
8362                         sector_t end = a + BB_LEN(p[lo]);
8363
8364                         if (a < s) {
8365                                 /* we need to split this range */
8366                                 if (bb->count >= MD_MAX_BADBLOCKS) {
8367                                         rv = 0;
8368                                         goto out;
8369                                 }
8370                                 memmove(p+lo+1, p+lo, (bb->count - lo) * 8);
8371                                 bb->count++;
8372                                 p[lo] = BB_MAKE(a, s-a, ack);
8373                                 lo++;
8374                         }
8375                         p[lo] = BB_MAKE(target, end - target, ack);
8376                         /* there is no longer an overlap */
8377                         hi = lo;
8378                         lo--;
8379                 }
8380                 while (lo >= 0 &&
8381                        BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
8382                         /* This range does overlap */
8383                         if (BB_OFFSET(p[lo]) < s) {
8384                                 /* Keep the early parts of this range. */
8385                                 int ack = BB_ACK(p[lo]);
8386                                 sector_t start = BB_OFFSET(p[lo]);
8387                                 p[lo] = BB_MAKE(start, s - start, ack);
8388                                 /* now low doesn't overlap, so.. */
8389                                 break;
8390                         }
8391                         lo--;
8392                 }
8393                 /* 'lo' is strictly before, 'hi' is strictly after,
8394                  * anything between needs to be discarded
8395                  */
8396                 if (hi - lo > 1) {
8397                         memmove(p+lo+1, p+hi, (bb->count - hi) * 8);
8398                         bb->count -= (hi - lo - 1);
8399                 }
8400         }
8401
8402         bb->changed = 1;
8403 out:
8404         write_sequnlock_irq(&bb->lock);
8405         return rv;
8406 }
8407
8408 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8409                          int is_new)
8410 {
8411         if (is_new)
8412                 s += rdev->new_data_offset;
8413         else
8414                 s += rdev->data_offset;
8415         return md_clear_badblocks(&rdev->badblocks,
8416                                   s, sectors);
8417 }
8418 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8419
8420 /*
8421  * Acknowledge all bad blocks in a list.
8422  * This only succeeds if ->changed is clear.  It is used by
8423  * in-kernel metadata updates
8424  */
8425 void md_ack_all_badblocks(struct badblocks *bb)
8426 {
8427         if (bb->page == NULL || bb->changed)
8428                 /* no point even trying */
8429                 return;
8430         write_seqlock_irq(&bb->lock);
8431
8432         if (bb->changed == 0 && bb->unacked_exist) {
8433                 u64 *p = bb->page;
8434                 int i;
8435                 for (i = 0; i < bb->count ; i++) {
8436                         if (!BB_ACK(p[i])) {
8437                                 sector_t start = BB_OFFSET(p[i]);
8438                                 int len = BB_LEN(p[i]);
8439                                 p[i] = BB_MAKE(start, len, 1);
8440                         }
8441                 }
8442                 bb->unacked_exist = 0;
8443         }
8444         write_sequnlock_irq(&bb->lock);
8445 }
8446 EXPORT_SYMBOL_GPL(md_ack_all_badblocks);
8447
8448 /* sysfs access to bad-blocks list.
8449  * We present two files.
8450  * 'bad-blocks' lists sector numbers and lengths of ranges that
8451  *    are recorded as bad.  The list is truncated to fit within
8452  *    the one-page limit of sysfs.
8453  *    Writing "sector length" to this file adds an acknowledged
8454  *    bad block list.
8455  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
8456  *    been acknowledged.  Writing to this file adds bad blocks
8457  *    without acknowledging them.  This is largely for testing.
8458  */
8459
8460 static ssize_t
8461 badblocks_show(struct badblocks *bb, char *page, int unack)
8462 {
8463         size_t len;
8464         int i;
8465         u64 *p = bb->page;
8466         unsigned seq;
8467
8468         if (bb->shift < 0)
8469                 return 0;
8470
8471 retry:
8472         seq = read_seqbegin(&bb->lock);
8473
8474         len = 0;
8475         i = 0;
8476
8477         while (len < PAGE_SIZE && i < bb->count) {
8478                 sector_t s = BB_OFFSET(p[i]);
8479                 unsigned int length = BB_LEN(p[i]);
8480                 int ack = BB_ACK(p[i]);
8481                 i++;
8482
8483                 if (unack && ack)
8484                         continue;
8485
8486                 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n",
8487                                 (unsigned long long)s << bb->shift,
8488                                 length << bb->shift);
8489         }
8490         if (unack && len == 0)
8491                 bb->unacked_exist = 0;
8492
8493         if (read_seqretry(&bb->lock, seq))
8494                 goto retry;
8495
8496         return len;
8497 }
8498
8499 #define DO_DEBUG 1
8500
8501 static ssize_t
8502 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack)
8503 {
8504         unsigned long long sector;
8505         int length;
8506         char newline;
8507 #ifdef DO_DEBUG
8508         /* Allow clearing via sysfs *only* for testing/debugging.
8509          * Normally only a successful write may clear a badblock
8510          */
8511         int clear = 0;
8512         if (page[0] == '-') {
8513                 clear = 1;
8514                 page++;
8515         }
8516 #endif /* DO_DEBUG */
8517
8518         switch (sscanf(page, "%llu %d%c", &sector, &length, &newline)) {
8519         case 3:
8520                 if (newline != '\n')
8521                         return -EINVAL;
8522         case 2:
8523                 if (length <= 0)
8524                         return -EINVAL;
8525                 break;
8526         default:
8527                 return -EINVAL;
8528         }
8529
8530 #ifdef DO_DEBUG
8531         if (clear) {
8532                 md_clear_badblocks(bb, sector, length);
8533                 return len;
8534         }
8535 #endif /* DO_DEBUG */
8536         if (md_set_badblocks(bb, sector, length, !unack))
8537                 return len;
8538         else
8539                 return -ENOSPC;
8540 }
8541
8542 static int md_notify_reboot(struct notifier_block *this,
8543                             unsigned long code, void *x)
8544 {
8545         struct list_head *tmp;
8546         struct mddev *mddev;
8547         int need_delay = 0;
8548
8549         for_each_mddev(mddev, tmp) {
8550                 if (mddev_trylock(mddev)) {
8551                         if (mddev->pers)
8552                                 __md_stop_writes(mddev);
8553                         if (mddev->persistent)
8554                                 mddev->safemode = 2;
8555                         mddev_unlock(mddev);
8556                 }
8557                 need_delay = 1;
8558         }
8559         /*
8560          * certain more exotic SCSI devices are known to be
8561          * volatile wrt too early system reboots. While the
8562          * right place to handle this issue is the given
8563          * driver, we do want to have a safe RAID driver ...
8564          */
8565         if (need_delay)
8566                 mdelay(1000*1);
8567
8568         return NOTIFY_DONE;
8569 }
8570
8571 static struct notifier_block md_notifier = {
8572         .notifier_call  = md_notify_reboot,
8573         .next           = NULL,
8574         .priority       = INT_MAX, /* before any real devices */
8575 };
8576
8577 static void md_geninit(void)
8578 {
8579         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8580
8581         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8582 }
8583
8584 static int __init md_init(void)
8585 {
8586         int ret = -ENOMEM;
8587
8588         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8589         if (!md_wq)
8590                 goto err_wq;
8591
8592         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8593         if (!md_misc_wq)
8594                 goto err_misc_wq;
8595
8596         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8597                 goto err_md;
8598
8599         if ((ret = register_blkdev(0, "mdp")) < 0)
8600                 goto err_mdp;
8601         mdp_major = ret;
8602
8603         blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
8604                             md_probe, NULL, NULL);
8605         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8606                             md_probe, NULL, NULL);
8607
8608         register_reboot_notifier(&md_notifier);
8609         raid_table_header = register_sysctl_table(raid_root_table);
8610
8611         md_geninit();
8612         return 0;
8613
8614 err_mdp:
8615         unregister_blkdev(MD_MAJOR, "md");
8616 err_md:
8617         destroy_workqueue(md_misc_wq);
8618 err_misc_wq:
8619         destroy_workqueue(md_wq);
8620 err_wq:
8621         return ret;
8622 }
8623
8624 #ifndef MODULE
8625
8626 /*
8627  * Searches all registered partitions for autorun RAID arrays
8628  * at boot time.
8629  */
8630
8631 static LIST_HEAD(all_detected_devices);
8632 struct detected_devices_node {
8633         struct list_head list;
8634         dev_t dev;
8635 };
8636
8637 void md_autodetect_dev(dev_t dev)
8638 {
8639         struct detected_devices_node *node_detected_dev;
8640
8641         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8642         if (node_detected_dev) {
8643                 node_detected_dev->dev = dev;
8644                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8645         } else {
8646                 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed"
8647                         ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev));
8648         }
8649 }
8650
8651
8652 static void autostart_arrays(int part)
8653 {
8654         struct md_rdev *rdev;
8655         struct detected_devices_node *node_detected_dev;
8656         dev_t dev;
8657         int i_scanned, i_passed;
8658
8659         i_scanned = 0;
8660         i_passed = 0;
8661
8662         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
8663
8664         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8665                 i_scanned++;
8666                 node_detected_dev = list_entry(all_detected_devices.next,
8667                                         struct detected_devices_node, list);
8668                 list_del(&node_detected_dev->list);
8669                 dev = node_detected_dev->dev;
8670                 kfree(node_detected_dev);
8671                 rdev = md_import_device(dev,0, 90);
8672                 if (IS_ERR(rdev))
8673                         continue;
8674
8675                 if (test_bit(Faulty, &rdev->flags)) {
8676                         MD_BUG();
8677                         continue;
8678                 }
8679                 set_bit(AutoDetected, &rdev->flags);
8680                 list_add(&rdev->same_set, &pending_raid_disks);
8681                 i_passed++;
8682         }
8683
8684         printk(KERN_INFO "md: Scanned %d and added %d devices.\n",
8685                                                 i_scanned, i_passed);
8686
8687         autorun_devices(part);
8688 }
8689
8690 #endif /* !MODULE */
8691
8692 static __exit void md_exit(void)
8693 {
8694         struct mddev *mddev;
8695         struct list_head *tmp;
8696
8697         blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
8698         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8699
8700         unregister_blkdev(MD_MAJOR,"md");
8701         unregister_blkdev(mdp_major, "mdp");
8702         unregister_reboot_notifier(&md_notifier);
8703         unregister_sysctl_table(raid_table_header);
8704         remove_proc_entry("mdstat", NULL);
8705         for_each_mddev(mddev, tmp) {
8706                 export_array(mddev);
8707                 mddev->hold_active = 0;
8708         }
8709         destroy_workqueue(md_misc_wq);
8710         destroy_workqueue(md_wq);
8711 }
8712
8713 subsys_initcall(md_init);
8714 module_exit(md_exit)
8715
8716 static int get_ro(char *buffer, struct kernel_param *kp)
8717 {
8718         return sprintf(buffer, "%d", start_readonly);
8719 }
8720 static int set_ro(const char *val, struct kernel_param *kp)
8721 {
8722         char *e;
8723         int num = simple_strtoul(val, &e, 10);
8724         if (*val && (*e == '\0' || *e == '\n')) {
8725                 start_readonly = num;
8726                 return 0;
8727         }
8728         return -EINVAL;
8729 }
8730
8731 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8732 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8733
8734 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8735
8736 EXPORT_SYMBOL(register_md_personality);
8737 EXPORT_SYMBOL(unregister_md_personality);
8738 EXPORT_SYMBOL(md_error);
8739 EXPORT_SYMBOL(md_done_sync);
8740 EXPORT_SYMBOL(md_write_start);
8741 EXPORT_SYMBOL(md_write_end);
8742 EXPORT_SYMBOL(md_register_thread);
8743 EXPORT_SYMBOL(md_unregister_thread);
8744 EXPORT_SYMBOL(md_wakeup_thread);
8745 EXPORT_SYMBOL(md_check_recovery);
8746 EXPORT_SYMBOL(md_reap_sync_thread);
8747 MODULE_LICENSE("GPL");
8748 MODULE_DESCRIPTION("MD RAID framework");
8749 MODULE_ALIAS("md");
8750 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);