md: add a mutex to synchronize idle and frozen in action_store()
[platform/kernel/linux-starfive.git] / drivers / md / md.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    md.c : Multiple Devices driver for Linux
4      Copyright (C) 1998, 1999, 2000 Ingo Molnar
5
6      completely rewritten, based on the MD driver code from Marc Zyngier
7
8    Changes:
9
10    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
11    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
12    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
13    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
14    - kmod support by: Cyrus Durgin
15    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
16    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
17
18    - lots of fixes and improvements to the RAID1/RAID5 and generic
19      RAID code (such as request based resynchronization):
20
21      Neil Brown <neilb@cse.unsw.edu.au>.
22
23    - persistent bitmap code
24      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
25
26
27    Errors, Warnings, etc.
28    Please use:
29      pr_crit() for error conditions that risk data loss
30      pr_err() for error conditions that are unexpected, like an IO error
31          or internal inconsistency
32      pr_warn() for error conditions that could have been predicated, like
33          adding a device to an array when it has incompatible metadata
34      pr_info() for every interesting, very rare events, like an array starting
35          or stopping, or resync starting or stopping
36      pr_debug() for everything else.
37
38 */
39
40 #include <linux/sched/mm.h>
41 #include <linux/sched/signal.h>
42 #include <linux/kthread.h>
43 #include <linux/blkdev.h>
44 #include <linux/blk-integrity.h>
45 #include <linux/badblocks.h>
46 #include <linux/sysctl.h>
47 #include <linux/seq_file.h>
48 #include <linux/fs.h>
49 #include <linux/poll.h>
50 #include <linux/ctype.h>
51 #include <linux/string.h>
52 #include <linux/hdreg.h>
53 #include <linux/proc_fs.h>
54 #include <linux/random.h>
55 #include <linux/major.h>
56 #include <linux/module.h>
57 #include <linux/reboot.h>
58 #include <linux/file.h>
59 #include <linux/compat.h>
60 #include <linux/delay.h>
61 #include <linux/raid/md_p.h>
62 #include <linux/raid/md_u.h>
63 #include <linux/raid/detect.h>
64 #include <linux/slab.h>
65 #include <linux/percpu-refcount.h>
66 #include <linux/part_stat.h>
67
68 #include <trace/events/block.h>
69 #include "md.h"
70 #include "md-bitmap.h"
71 #include "md-cluster.h"
72
73 /* pers_list is a list of registered personalities protected by pers_lock. */
74 static LIST_HEAD(pers_list);
75 static DEFINE_SPINLOCK(pers_lock);
76
77 static const struct kobj_type md_ktype;
78
79 struct md_cluster_operations *md_cluster_ops;
80 EXPORT_SYMBOL(md_cluster_ops);
81 static struct module *md_cluster_mod;
82
83 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
84 static struct workqueue_struct *md_wq;
85 static struct workqueue_struct *md_misc_wq;
86 struct workqueue_struct *md_bitmap_wq;
87
88 static int remove_and_add_spares(struct mddev *mddev,
89                                  struct md_rdev *this);
90 static void mddev_detach(struct mddev *mddev);
91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
92 static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
93
94 /*
95  * Default number of read corrections we'll attempt on an rdev
96  * before ejecting it from the array. We divide the read error
97  * count by 2 for every hour elapsed between read errors.
98  */
99 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
100 /* Default safemode delay: 200 msec */
101 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1)
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119         return mddev->sync_speed_min ?
120                 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
123 static inline int speed_max(struct mddev *mddev)
124 {
125         return mddev->sync_speed_max ?
126                 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
129 static void rdev_uninit_serial(struct md_rdev *rdev)
130 {
131         if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
132                 return;
133
134         kvfree(rdev->serial);
135         rdev->serial = NULL;
136 }
137
138 static void rdevs_uninit_serial(struct mddev *mddev)
139 {
140         struct md_rdev *rdev;
141
142         rdev_for_each(rdev, mddev)
143                 rdev_uninit_serial(rdev);
144 }
145
146 static int rdev_init_serial(struct md_rdev *rdev)
147 {
148         /* serial_nums equals with BARRIER_BUCKETS_NR */
149         int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
150         struct serial_in_rdev *serial = NULL;
151
152         if (test_bit(CollisionCheck, &rdev->flags))
153                 return 0;
154
155         serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
156                           GFP_KERNEL);
157         if (!serial)
158                 return -ENOMEM;
159
160         for (i = 0; i < serial_nums; i++) {
161                 struct serial_in_rdev *serial_tmp = &serial[i];
162
163                 spin_lock_init(&serial_tmp->serial_lock);
164                 serial_tmp->serial_rb = RB_ROOT_CACHED;
165                 init_waitqueue_head(&serial_tmp->serial_io_wait);
166         }
167
168         rdev->serial = serial;
169         set_bit(CollisionCheck, &rdev->flags);
170
171         return 0;
172 }
173
174 static int rdevs_init_serial(struct mddev *mddev)
175 {
176         struct md_rdev *rdev;
177         int ret = 0;
178
179         rdev_for_each(rdev, mddev) {
180                 ret = rdev_init_serial(rdev);
181                 if (ret)
182                         break;
183         }
184
185         /* Free all resources if pool is not existed */
186         if (ret && !mddev->serial_info_pool)
187                 rdevs_uninit_serial(mddev);
188
189         return ret;
190 }
191
192 /*
193  * rdev needs to enable serial stuffs if it meets the conditions:
194  * 1. it is multi-queue device flaged with writemostly.
195  * 2. the write-behind mode is enabled.
196  */
197 static int rdev_need_serial(struct md_rdev *rdev)
198 {
199         return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200                 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201                 test_bit(WriteMostly, &rdev->flags));
202 }
203
204 /*
205  * Init resource for rdev(s), then create serial_info_pool if:
206  * 1. rdev is the first device which return true from rdev_enable_serial.
207  * 2. rdev is NULL, means we want to enable serialization for all rdevs.
208  */
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
210                               bool is_suspend)
211 {
212         int ret = 0;
213
214         if (rdev && !rdev_need_serial(rdev) &&
215             !test_bit(CollisionCheck, &rdev->flags))
216                 return;
217
218         if (!is_suspend)
219                 mddev_suspend(mddev);
220
221         if (!rdev)
222                 ret = rdevs_init_serial(mddev);
223         else
224                 ret = rdev_init_serial(rdev);
225         if (ret)
226                 goto abort;
227
228         if (mddev->serial_info_pool == NULL) {
229                 /*
230                  * already in memalloc noio context by
231                  * mddev_suspend()
232                  */
233                 mddev->serial_info_pool =
234                         mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
235                                                 sizeof(struct serial_info));
236                 if (!mddev->serial_info_pool) {
237                         rdevs_uninit_serial(mddev);
238                         pr_err("can't alloc memory pool for serialization\n");
239                 }
240         }
241
242 abort:
243         if (!is_suspend)
244                 mddev_resume(mddev);
245 }
246
247 /*
248  * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249  * 1. rdev is the last device flaged with CollisionCheck.
250  * 2. when bitmap is destroyed while policy is not enabled.
251  * 3. for disable policy, the pool is destroyed only when no rdev needs it.
252  */
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
254                                bool is_suspend)
255 {
256         if (rdev && !test_bit(CollisionCheck, &rdev->flags))
257                 return;
258
259         if (mddev->serial_info_pool) {
260                 struct md_rdev *temp;
261                 int num = 0; /* used to track if other rdevs need the pool */
262
263                 if (!is_suspend)
264                         mddev_suspend(mddev);
265                 rdev_for_each(temp, mddev) {
266                         if (!rdev) {
267                                 if (!mddev->serialize_policy ||
268                                     !rdev_need_serial(temp))
269                                         rdev_uninit_serial(temp);
270                                 else
271                                         num++;
272                         } else if (temp != rdev &&
273                                    test_bit(CollisionCheck, &temp->flags))
274                                 num++;
275                 }
276
277                 if (rdev)
278                         rdev_uninit_serial(rdev);
279
280                 if (num)
281                         pr_info("The mempool could be used by other devices\n");
282                 else {
283                         mempool_destroy(mddev->serial_info_pool);
284                         mddev->serial_info_pool = NULL;
285                 }
286                 if (!is_suspend)
287                         mddev_resume(mddev);
288         }
289 }
290
291 static struct ctl_table_header *raid_table_header;
292
293 static struct ctl_table raid_table[] = {
294         {
295                 .procname       = "speed_limit_min",
296                 .data           = &sysctl_speed_limit_min,
297                 .maxlen         = sizeof(int),
298                 .mode           = S_IRUGO|S_IWUSR,
299                 .proc_handler   = proc_dointvec,
300         },
301         {
302                 .procname       = "speed_limit_max",
303                 .data           = &sysctl_speed_limit_max,
304                 .maxlen         = sizeof(int),
305                 .mode           = S_IRUGO|S_IWUSR,
306                 .proc_handler   = proc_dointvec,
307         },
308         { }
309 };
310
311 static int start_readonly;
312
313 /*
314  * The original mechanism for creating an md device is to create
315  * a device node in /dev and to open it.  This causes races with device-close.
316  * The preferred method is to write to the "new_array" module parameter.
317  * This can avoid races.
318  * Setting create_on_open to false disables the original mechanism
319  * so all the races disappear.
320  */
321 static bool create_on_open = true;
322
323 /*
324  * We have a system wide 'event count' that is incremented
325  * on any 'interesting' event, and readers of /proc/mdstat
326  * can use 'poll' or 'select' to find out when the event
327  * count increases.
328  *
329  * Events are:
330  *  start array, stop array, error, add device, remove device,
331  *  start build, activate spare
332  */
333 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
334 static atomic_t md_event_count;
335 void md_new_event(void)
336 {
337         atomic_inc(&md_event_count);
338         wake_up(&md_event_waiters);
339 }
340 EXPORT_SYMBOL_GPL(md_new_event);
341
342 /*
343  * Enables to iterate over all existing md arrays
344  * all_mddevs_lock protects this list.
345  */
346 static LIST_HEAD(all_mddevs);
347 static DEFINE_SPINLOCK(all_mddevs_lock);
348
349 /* Rather than calling directly into the personality make_request function,
350  * IO requests come here first so that we can check if the device is
351  * being suspended pending a reconfiguration.
352  * We hold a refcount over the call to ->make_request.  By the time that
353  * call has finished, the bio has been linked into some internal structure
354  * and so is visible to ->quiesce(), so we don't need the refcount any more.
355  */
356 static bool is_suspended(struct mddev *mddev, struct bio *bio)
357 {
358         if (is_md_suspended(mddev))
359                 return true;
360         if (bio_data_dir(bio) != WRITE)
361                 return false;
362         if (mddev->suspend_lo >= mddev->suspend_hi)
363                 return false;
364         if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
365                 return false;
366         if (bio_end_sector(bio) < mddev->suspend_lo)
367                 return false;
368         return true;
369 }
370
371 void md_handle_request(struct mddev *mddev, struct bio *bio)
372 {
373 check_suspended:
374         if (is_suspended(mddev, bio)) {
375                 DEFINE_WAIT(__wait);
376                 /* Bail out if REQ_NOWAIT is set for the bio */
377                 if (bio->bi_opf & REQ_NOWAIT) {
378                         bio_wouldblock_error(bio);
379                         return;
380                 }
381                 for (;;) {
382                         prepare_to_wait(&mddev->sb_wait, &__wait,
383                                         TASK_UNINTERRUPTIBLE);
384                         if (!is_suspended(mddev, bio))
385                                 break;
386                         schedule();
387                 }
388                 finish_wait(&mddev->sb_wait, &__wait);
389         }
390         if (!percpu_ref_tryget_live(&mddev->active_io))
391                 goto check_suspended;
392
393         if (!mddev->pers->make_request(mddev, bio)) {
394                 percpu_ref_put(&mddev->active_io);
395                 goto check_suspended;
396         }
397
398         percpu_ref_put(&mddev->active_io);
399 }
400 EXPORT_SYMBOL(md_handle_request);
401
402 static void md_submit_bio(struct bio *bio)
403 {
404         const int rw = bio_data_dir(bio);
405         struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
406
407         if (mddev == NULL || mddev->pers == NULL) {
408                 bio_io_error(bio);
409                 return;
410         }
411
412         if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
413                 bio_io_error(bio);
414                 return;
415         }
416
417         bio = bio_split_to_limits(bio);
418         if (!bio)
419                 return;
420
421         if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
422                 if (bio_sectors(bio) != 0)
423                         bio->bi_status = BLK_STS_IOERR;
424                 bio_endio(bio);
425                 return;
426         }
427
428         /* bio could be mergeable after passing to underlayer */
429         bio->bi_opf &= ~REQ_NOMERGE;
430
431         md_handle_request(mddev, bio);
432 }
433
434 /* mddev_suspend makes sure no new requests are submitted
435  * to the device, and that any requests that have been submitted
436  * are completely handled.
437  * Once mddev_detach() is called and completes, the module will be
438  * completely unused.
439  */
440 void mddev_suspend(struct mddev *mddev)
441 {
442         struct md_thread *thread = rcu_dereference_protected(mddev->thread,
443                         lockdep_is_held(&mddev->reconfig_mutex));
444
445         WARN_ON_ONCE(thread && current == thread->tsk);
446         if (mddev->suspended++)
447                 return;
448         wake_up(&mddev->sb_wait);
449         set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
450         percpu_ref_kill(&mddev->active_io);
451
452         if (mddev->pers->prepare_suspend)
453                 mddev->pers->prepare_suspend(mddev);
454
455         wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
456         mddev->pers->quiesce(mddev, 1);
457         clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
458         wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
459
460         del_timer_sync(&mddev->safemode_timer);
461         /* restrict memory reclaim I/O during raid array is suspend */
462         mddev->noio_flag = memalloc_noio_save();
463 }
464 EXPORT_SYMBOL_GPL(mddev_suspend);
465
466 void mddev_resume(struct mddev *mddev)
467 {
468         /* entred the memalloc scope from mddev_suspend() */
469         memalloc_noio_restore(mddev->noio_flag);
470         lockdep_assert_held(&mddev->reconfig_mutex);
471         if (--mddev->suspended)
472                 return;
473         percpu_ref_resurrect(&mddev->active_io);
474         wake_up(&mddev->sb_wait);
475         mddev->pers->quiesce(mddev, 0);
476
477         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
478         md_wakeup_thread(mddev->thread);
479         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
480 }
481 EXPORT_SYMBOL_GPL(mddev_resume);
482
483 /*
484  * Generic flush handling for md
485  */
486
487 static void md_end_flush(struct bio *bio)
488 {
489         struct md_rdev *rdev = bio->bi_private;
490         struct mddev *mddev = rdev->mddev;
491
492         bio_put(bio);
493
494         rdev_dec_pending(rdev, mddev);
495
496         if (atomic_dec_and_test(&mddev->flush_pending)) {
497                 /* The pre-request flush has finished */
498                 queue_work(md_wq, &mddev->flush_work);
499         }
500 }
501
502 static void md_submit_flush_data(struct work_struct *ws);
503
504 static void submit_flushes(struct work_struct *ws)
505 {
506         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
507         struct md_rdev *rdev;
508
509         mddev->start_flush = ktime_get_boottime();
510         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
511         atomic_set(&mddev->flush_pending, 1);
512         rcu_read_lock();
513         rdev_for_each_rcu(rdev, mddev)
514                 if (rdev->raid_disk >= 0 &&
515                     !test_bit(Faulty, &rdev->flags)) {
516                         /* Take two references, one is dropped
517                          * when request finishes, one after
518                          * we reclaim rcu_read_lock
519                          */
520                         struct bio *bi;
521                         atomic_inc(&rdev->nr_pending);
522                         atomic_inc(&rdev->nr_pending);
523                         rcu_read_unlock();
524                         bi = bio_alloc_bioset(rdev->bdev, 0,
525                                               REQ_OP_WRITE | REQ_PREFLUSH,
526                                               GFP_NOIO, &mddev->bio_set);
527                         bi->bi_end_io = md_end_flush;
528                         bi->bi_private = rdev;
529                         atomic_inc(&mddev->flush_pending);
530                         submit_bio(bi);
531                         rcu_read_lock();
532                         rdev_dec_pending(rdev, mddev);
533                 }
534         rcu_read_unlock();
535         if (atomic_dec_and_test(&mddev->flush_pending))
536                 queue_work(md_wq, &mddev->flush_work);
537 }
538
539 static void md_submit_flush_data(struct work_struct *ws)
540 {
541         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
542         struct bio *bio = mddev->flush_bio;
543
544         /*
545          * must reset flush_bio before calling into md_handle_request to avoid a
546          * deadlock, because other bios passed md_handle_request suspend check
547          * could wait for this and below md_handle_request could wait for those
548          * bios because of suspend check
549          */
550         spin_lock_irq(&mddev->lock);
551         mddev->prev_flush_start = mddev->start_flush;
552         mddev->flush_bio = NULL;
553         spin_unlock_irq(&mddev->lock);
554         wake_up(&mddev->sb_wait);
555
556         if (bio->bi_iter.bi_size == 0) {
557                 /* an empty barrier - all done */
558                 bio_endio(bio);
559         } else {
560                 bio->bi_opf &= ~REQ_PREFLUSH;
561                 md_handle_request(mddev, bio);
562         }
563 }
564
565 /*
566  * Manages consolidation of flushes and submitting any flushes needed for
567  * a bio with REQ_PREFLUSH.  Returns true if the bio is finished or is
568  * being finished in another context.  Returns false if the flushing is
569  * complete but still needs the I/O portion of the bio to be processed.
570  */
571 bool md_flush_request(struct mddev *mddev, struct bio *bio)
572 {
573         ktime_t req_start = ktime_get_boottime();
574         spin_lock_irq(&mddev->lock);
575         /* flush requests wait until ongoing flush completes,
576          * hence coalescing all the pending requests.
577          */
578         wait_event_lock_irq(mddev->sb_wait,
579                             !mddev->flush_bio ||
580                             ktime_before(req_start, mddev->prev_flush_start),
581                             mddev->lock);
582         /* new request after previous flush is completed */
583         if (ktime_after(req_start, mddev->prev_flush_start)) {
584                 WARN_ON(mddev->flush_bio);
585                 mddev->flush_bio = bio;
586                 bio = NULL;
587         }
588         spin_unlock_irq(&mddev->lock);
589
590         if (!bio) {
591                 INIT_WORK(&mddev->flush_work, submit_flushes);
592                 queue_work(md_wq, &mddev->flush_work);
593         } else {
594                 /* flush was performed for some other bio while we waited. */
595                 if (bio->bi_iter.bi_size == 0)
596                         /* an empty barrier - all done */
597                         bio_endio(bio);
598                 else {
599                         bio->bi_opf &= ~REQ_PREFLUSH;
600                         return false;
601                 }
602         }
603         return true;
604 }
605 EXPORT_SYMBOL(md_flush_request);
606
607 static inline struct mddev *mddev_get(struct mddev *mddev)
608 {
609         lockdep_assert_held(&all_mddevs_lock);
610
611         if (test_bit(MD_DELETED, &mddev->flags))
612                 return NULL;
613         atomic_inc(&mddev->active);
614         return mddev;
615 }
616
617 static void mddev_delayed_delete(struct work_struct *ws);
618
619 void mddev_put(struct mddev *mddev)
620 {
621         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
622                 return;
623         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
624             mddev->ctime == 0 && !mddev->hold_active) {
625                 /* Array is not configured at all, and not held active,
626                  * so destroy it */
627                 set_bit(MD_DELETED, &mddev->flags);
628
629                 /*
630                  * Call queue_work inside the spinlock so that
631                  * flush_workqueue() after mddev_find will succeed in waiting
632                  * for the work to be done.
633                  */
634                 INIT_WORK(&mddev->del_work, mddev_delayed_delete);
635                 queue_work(md_misc_wq, &mddev->del_work);
636         }
637         spin_unlock(&all_mddevs_lock);
638 }
639
640 static void md_safemode_timeout(struct timer_list *t);
641
642 void mddev_init(struct mddev *mddev)
643 {
644         mutex_init(&mddev->open_mutex);
645         mutex_init(&mddev->reconfig_mutex);
646         mutex_init(&mddev->sync_mutex);
647         mutex_init(&mddev->bitmap_info.mutex);
648         INIT_LIST_HEAD(&mddev->disks);
649         INIT_LIST_HEAD(&mddev->all_mddevs);
650         INIT_LIST_HEAD(&mddev->deleting);
651         timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
652         atomic_set(&mddev->active, 1);
653         atomic_set(&mddev->openers, 0);
654         spin_lock_init(&mddev->lock);
655         atomic_set(&mddev->flush_pending, 0);
656         init_waitqueue_head(&mddev->sb_wait);
657         init_waitqueue_head(&mddev->recovery_wait);
658         mddev->reshape_position = MaxSector;
659         mddev->reshape_backwards = 0;
660         mddev->last_sync_action = "none";
661         mddev->resync_min = 0;
662         mddev->resync_max = MaxSector;
663         mddev->level = LEVEL_NONE;
664 }
665 EXPORT_SYMBOL_GPL(mddev_init);
666
667 static struct mddev *mddev_find_locked(dev_t unit)
668 {
669         struct mddev *mddev;
670
671         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
672                 if (mddev->unit == unit)
673                         return mddev;
674
675         return NULL;
676 }
677
678 /* find an unused unit number */
679 static dev_t mddev_alloc_unit(void)
680 {
681         static int next_minor = 512;
682         int start = next_minor;
683         bool is_free = 0;
684         dev_t dev = 0;
685
686         while (!is_free) {
687                 dev = MKDEV(MD_MAJOR, next_minor);
688                 next_minor++;
689                 if (next_minor > MINORMASK)
690                         next_minor = 0;
691                 if (next_minor == start)
692                         return 0;               /* Oh dear, all in use. */
693                 is_free = !mddev_find_locked(dev);
694         }
695
696         return dev;
697 }
698
699 static struct mddev *mddev_alloc(dev_t unit)
700 {
701         struct mddev *new;
702         int error;
703
704         if (unit && MAJOR(unit) != MD_MAJOR)
705                 unit &= ~((1 << MdpMinorShift) - 1);
706
707         new = kzalloc(sizeof(*new), GFP_KERNEL);
708         if (!new)
709                 return ERR_PTR(-ENOMEM);
710         mddev_init(new);
711
712         spin_lock(&all_mddevs_lock);
713         if (unit) {
714                 error = -EEXIST;
715                 if (mddev_find_locked(unit))
716                         goto out_free_new;
717                 new->unit = unit;
718                 if (MAJOR(unit) == MD_MAJOR)
719                         new->md_minor = MINOR(unit);
720                 else
721                         new->md_minor = MINOR(unit) >> MdpMinorShift;
722                 new->hold_active = UNTIL_IOCTL;
723         } else {
724                 error = -ENODEV;
725                 new->unit = mddev_alloc_unit();
726                 if (!new->unit)
727                         goto out_free_new;
728                 new->md_minor = MINOR(new->unit);
729                 new->hold_active = UNTIL_STOP;
730         }
731
732         list_add(&new->all_mddevs, &all_mddevs);
733         spin_unlock(&all_mddevs_lock);
734         return new;
735 out_free_new:
736         spin_unlock(&all_mddevs_lock);
737         kfree(new);
738         return ERR_PTR(error);
739 }
740
741 static void mddev_free(struct mddev *mddev)
742 {
743         spin_lock(&all_mddevs_lock);
744         list_del(&mddev->all_mddevs);
745         spin_unlock(&all_mddevs_lock);
746
747         kfree(mddev);
748 }
749
750 static const struct attribute_group md_redundancy_group;
751
752 void mddev_unlock(struct mddev *mddev)
753 {
754         struct md_rdev *rdev;
755         struct md_rdev *tmp;
756         LIST_HEAD(delete);
757
758         if (!list_empty(&mddev->deleting))
759                 list_splice_init(&mddev->deleting, &delete);
760
761         if (mddev->to_remove) {
762                 /* These cannot be removed under reconfig_mutex as
763                  * an access to the files will try to take reconfig_mutex
764                  * while holding the file unremovable, which leads to
765                  * a deadlock.
766                  * So hold set sysfs_active while the remove in happeing,
767                  * and anything else which might set ->to_remove or my
768                  * otherwise change the sysfs namespace will fail with
769                  * -EBUSY if sysfs_active is still set.
770                  * We set sysfs_active under reconfig_mutex and elsewhere
771                  * test it under the same mutex to ensure its correct value
772                  * is seen.
773                  */
774                 const struct attribute_group *to_remove = mddev->to_remove;
775                 mddev->to_remove = NULL;
776                 mddev->sysfs_active = 1;
777                 mutex_unlock(&mddev->reconfig_mutex);
778
779                 if (mddev->kobj.sd) {
780                         if (to_remove != &md_redundancy_group)
781                                 sysfs_remove_group(&mddev->kobj, to_remove);
782                         if (mddev->pers == NULL ||
783                             mddev->pers->sync_request == NULL) {
784                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
785                                 if (mddev->sysfs_action)
786                                         sysfs_put(mddev->sysfs_action);
787                                 if (mddev->sysfs_completed)
788                                         sysfs_put(mddev->sysfs_completed);
789                                 if (mddev->sysfs_degraded)
790                                         sysfs_put(mddev->sysfs_degraded);
791                                 mddev->sysfs_action = NULL;
792                                 mddev->sysfs_completed = NULL;
793                                 mddev->sysfs_degraded = NULL;
794                         }
795                 }
796                 mddev->sysfs_active = 0;
797         } else
798                 mutex_unlock(&mddev->reconfig_mutex);
799
800         list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
801                 list_del_init(&rdev->same_set);
802                 kobject_del(&rdev->kobj);
803                 export_rdev(rdev, mddev);
804         }
805
806         md_wakeup_thread(mddev->thread);
807         wake_up(&mddev->sb_wait);
808 }
809 EXPORT_SYMBOL_GPL(mddev_unlock);
810
811 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
812 {
813         struct md_rdev *rdev;
814
815         rdev_for_each_rcu(rdev, mddev)
816                 if (rdev->desc_nr == nr)
817                         return rdev;
818
819         return NULL;
820 }
821 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
822
823 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
824 {
825         struct md_rdev *rdev;
826
827         rdev_for_each(rdev, mddev)
828                 if (rdev->bdev->bd_dev == dev)
829                         return rdev;
830
831         return NULL;
832 }
833
834 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
835 {
836         struct md_rdev *rdev;
837
838         rdev_for_each_rcu(rdev, mddev)
839                 if (rdev->bdev->bd_dev == dev)
840                         return rdev;
841
842         return NULL;
843 }
844 EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
845
846 static struct md_personality *find_pers(int level, char *clevel)
847 {
848         struct md_personality *pers;
849         list_for_each_entry(pers, &pers_list, list) {
850                 if (level != LEVEL_NONE && pers->level == level)
851                         return pers;
852                 if (strcmp(pers->name, clevel)==0)
853                         return pers;
854         }
855         return NULL;
856 }
857
858 /* return the offset of the super block in 512byte sectors */
859 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
860 {
861         return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
862 }
863
864 static int alloc_disk_sb(struct md_rdev *rdev)
865 {
866         rdev->sb_page = alloc_page(GFP_KERNEL);
867         if (!rdev->sb_page)
868                 return -ENOMEM;
869         return 0;
870 }
871
872 void md_rdev_clear(struct md_rdev *rdev)
873 {
874         if (rdev->sb_page) {
875                 put_page(rdev->sb_page);
876                 rdev->sb_loaded = 0;
877                 rdev->sb_page = NULL;
878                 rdev->sb_start = 0;
879                 rdev->sectors = 0;
880         }
881         if (rdev->bb_page) {
882                 put_page(rdev->bb_page);
883                 rdev->bb_page = NULL;
884         }
885         badblocks_exit(&rdev->badblocks);
886 }
887 EXPORT_SYMBOL_GPL(md_rdev_clear);
888
889 static void super_written(struct bio *bio)
890 {
891         struct md_rdev *rdev = bio->bi_private;
892         struct mddev *mddev = rdev->mddev;
893
894         if (bio->bi_status) {
895                 pr_err("md: %s gets error=%d\n", __func__,
896                        blk_status_to_errno(bio->bi_status));
897                 md_error(mddev, rdev);
898                 if (!test_bit(Faulty, &rdev->flags)
899                     && (bio->bi_opf & MD_FAILFAST)) {
900                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
901                         set_bit(LastDev, &rdev->flags);
902                 }
903         } else
904                 clear_bit(LastDev, &rdev->flags);
905
906         bio_put(bio);
907
908         rdev_dec_pending(rdev, mddev);
909
910         if (atomic_dec_and_test(&mddev->pending_writes))
911                 wake_up(&mddev->sb_wait);
912 }
913
914 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
915                    sector_t sector, int size, struct page *page)
916 {
917         /* write first size bytes of page to sector of rdev
918          * Increment mddev->pending_writes before returning
919          * and decrement it on completion, waking up sb_wait
920          * if zero is reached.
921          * If an error occurred, call md_error
922          */
923         struct bio *bio;
924
925         if (!page)
926                 return;
927
928         if (test_bit(Faulty, &rdev->flags))
929                 return;
930
931         bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
932                                1,
933                                REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA,
934                                GFP_NOIO, &mddev->sync_set);
935
936         atomic_inc(&rdev->nr_pending);
937
938         bio->bi_iter.bi_sector = sector;
939         __bio_add_page(bio, page, size, 0);
940         bio->bi_private = rdev;
941         bio->bi_end_io = super_written;
942
943         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
944             test_bit(FailFast, &rdev->flags) &&
945             !test_bit(LastDev, &rdev->flags))
946                 bio->bi_opf |= MD_FAILFAST;
947
948         atomic_inc(&mddev->pending_writes);
949         submit_bio(bio);
950 }
951
952 int md_super_wait(struct mddev *mddev)
953 {
954         /* wait for all superblock writes that were scheduled to complete */
955         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
956         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
957                 return -EAGAIN;
958         return 0;
959 }
960
961 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
962                  struct page *page, blk_opf_t opf, bool metadata_op)
963 {
964         struct bio bio;
965         struct bio_vec bvec;
966
967         if (metadata_op && rdev->meta_bdev)
968                 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
969         else
970                 bio_init(&bio, rdev->bdev, &bvec, 1, opf);
971
972         if (metadata_op)
973                 bio.bi_iter.bi_sector = sector + rdev->sb_start;
974         else if (rdev->mddev->reshape_position != MaxSector &&
975                  (rdev->mddev->reshape_backwards ==
976                   (sector >= rdev->mddev->reshape_position)))
977                 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
978         else
979                 bio.bi_iter.bi_sector = sector + rdev->data_offset;
980         __bio_add_page(&bio, page, size, 0);
981
982         submit_bio_wait(&bio);
983
984         return !bio.bi_status;
985 }
986 EXPORT_SYMBOL_GPL(sync_page_io);
987
988 static int read_disk_sb(struct md_rdev *rdev, int size)
989 {
990         if (rdev->sb_loaded)
991                 return 0;
992
993         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
994                 goto fail;
995         rdev->sb_loaded = 1;
996         return 0;
997
998 fail:
999         pr_err("md: disabled device %pg, could not read superblock.\n",
1000                rdev->bdev);
1001         return -EINVAL;
1002 }
1003
1004 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1005 {
1006         return  sb1->set_uuid0 == sb2->set_uuid0 &&
1007                 sb1->set_uuid1 == sb2->set_uuid1 &&
1008                 sb1->set_uuid2 == sb2->set_uuid2 &&
1009                 sb1->set_uuid3 == sb2->set_uuid3;
1010 }
1011
1012 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
1013 {
1014         int ret;
1015         mdp_super_t *tmp1, *tmp2;
1016
1017         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
1018         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
1019
1020         if (!tmp1 || !tmp2) {
1021                 ret = 0;
1022                 goto abort;
1023         }
1024
1025         *tmp1 = *sb1;
1026         *tmp2 = *sb2;
1027
1028         /*
1029          * nr_disks is not constant
1030          */
1031         tmp1->nr_disks = 0;
1032         tmp2->nr_disks = 0;
1033
1034         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
1035 abort:
1036         kfree(tmp1);
1037         kfree(tmp2);
1038         return ret;
1039 }
1040
1041 static u32 md_csum_fold(u32 csum)
1042 {
1043         csum = (csum & 0xffff) + (csum >> 16);
1044         return (csum & 0xffff) + (csum >> 16);
1045 }
1046
1047 static unsigned int calc_sb_csum(mdp_super_t *sb)
1048 {
1049         u64 newcsum = 0;
1050         u32 *sb32 = (u32*)sb;
1051         int i;
1052         unsigned int disk_csum, csum;
1053
1054         disk_csum = sb->sb_csum;
1055         sb->sb_csum = 0;
1056
1057         for (i = 0; i < MD_SB_BYTES/4 ; i++)
1058                 newcsum += sb32[i];
1059         csum = (newcsum & 0xffffffff) + (newcsum>>32);
1060
1061 #ifdef CONFIG_ALPHA
1062         /* This used to use csum_partial, which was wrong for several
1063          * reasons including that different results are returned on
1064          * different architectures.  It isn't critical that we get exactly
1065          * the same return value as before (we always csum_fold before
1066          * testing, and that removes any differences).  However as we
1067          * know that csum_partial always returned a 16bit value on
1068          * alphas, do a fold to maximise conformity to previous behaviour.
1069          */
1070         sb->sb_csum = md_csum_fold(disk_csum);
1071 #else
1072         sb->sb_csum = disk_csum;
1073 #endif
1074         return csum;
1075 }
1076
1077 /*
1078  * Handle superblock details.
1079  * We want to be able to handle multiple superblock formats
1080  * so we have a common interface to them all, and an array of
1081  * different handlers.
1082  * We rely on user-space to write the initial superblock, and support
1083  * reading and updating of superblocks.
1084  * Interface methods are:
1085  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
1086  *      loads and validates a superblock on dev.
1087  *      if refdev != NULL, compare superblocks on both devices
1088  *    Return:
1089  *      0 - dev has a superblock that is compatible with refdev
1090  *      1 - dev has a superblock that is compatible and newer than refdev
1091  *          so dev should be used as the refdev in future
1092  *     -EINVAL superblock incompatible or invalid
1093  *     -othererror e.g. -EIO
1094  *
1095  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
1096  *      Verify that dev is acceptable into mddev.
1097  *       The first time, mddev->raid_disks will be 0, and data from
1098  *       dev should be merged in.  Subsequent calls check that dev
1099  *       is new enough.  Return 0 or -EINVAL
1100  *
1101  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
1102  *     Update the superblock for rdev with data in mddev
1103  *     This does not write to disc.
1104  *
1105  */
1106
1107 struct super_type  {
1108         char                *name;
1109         struct module       *owner;
1110         int                 (*load_super)(struct md_rdev *rdev,
1111                                           struct md_rdev *refdev,
1112                                           int minor_version);
1113         int                 (*validate_super)(struct mddev *mddev,
1114                                               struct md_rdev *rdev);
1115         void                (*sync_super)(struct mddev *mddev,
1116                                           struct md_rdev *rdev);
1117         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
1118                                                 sector_t num_sectors);
1119         int                 (*allow_new_offset)(struct md_rdev *rdev,
1120                                                 unsigned long long new_offset);
1121 };
1122
1123 /*
1124  * Check that the given mddev has no bitmap.
1125  *
1126  * This function is called from the run method of all personalities that do not
1127  * support bitmaps. It prints an error message and returns non-zero if mddev
1128  * has a bitmap. Otherwise, it returns 0.
1129  *
1130  */
1131 int md_check_no_bitmap(struct mddev *mddev)
1132 {
1133         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
1134                 return 0;
1135         pr_warn("%s: bitmaps are not supported for %s\n",
1136                 mdname(mddev), mddev->pers->name);
1137         return 1;
1138 }
1139 EXPORT_SYMBOL(md_check_no_bitmap);
1140
1141 /*
1142  * load_super for 0.90.0
1143  */
1144 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1145 {
1146         mdp_super_t *sb;
1147         int ret;
1148         bool spare_disk = true;
1149
1150         /*
1151          * Calculate the position of the superblock (512byte sectors),
1152          * it's at the end of the disk.
1153          *
1154          * It also happens to be a multiple of 4Kb.
1155          */
1156         rdev->sb_start = calc_dev_sboffset(rdev);
1157
1158         ret = read_disk_sb(rdev, MD_SB_BYTES);
1159         if (ret)
1160                 return ret;
1161
1162         ret = -EINVAL;
1163
1164         sb = page_address(rdev->sb_page);
1165
1166         if (sb->md_magic != MD_SB_MAGIC) {
1167                 pr_warn("md: invalid raid superblock magic on %pg\n",
1168                         rdev->bdev);
1169                 goto abort;
1170         }
1171
1172         if (sb->major_version != 0 ||
1173             sb->minor_version < 90 ||
1174             sb->minor_version > 91) {
1175                 pr_warn("Bad version number %d.%d on %pg\n",
1176                         sb->major_version, sb->minor_version, rdev->bdev);
1177                 goto abort;
1178         }
1179
1180         if (sb->raid_disks <= 0)
1181                 goto abort;
1182
1183         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1184                 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1185                 goto abort;
1186         }
1187
1188         rdev->preferred_minor = sb->md_minor;
1189         rdev->data_offset = 0;
1190         rdev->new_data_offset = 0;
1191         rdev->sb_size = MD_SB_BYTES;
1192         rdev->badblocks.shift = -1;
1193
1194         if (sb->level == LEVEL_MULTIPATH)
1195                 rdev->desc_nr = -1;
1196         else
1197                 rdev->desc_nr = sb->this_disk.number;
1198
1199         /* not spare disk, or LEVEL_MULTIPATH */
1200         if (sb->level == LEVEL_MULTIPATH ||
1201                 (rdev->desc_nr >= 0 &&
1202                  rdev->desc_nr < MD_SB_DISKS &&
1203                  sb->disks[rdev->desc_nr].state &
1204                  ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
1205                 spare_disk = false;
1206
1207         if (!refdev) {
1208                 if (!spare_disk)
1209                         ret = 1;
1210                 else
1211                         ret = 0;
1212         } else {
1213                 __u64 ev1, ev2;
1214                 mdp_super_t *refsb = page_address(refdev->sb_page);
1215                 if (!md_uuid_equal(refsb, sb)) {
1216                         pr_warn("md: %pg has different UUID to %pg\n",
1217                                 rdev->bdev, refdev->bdev);
1218                         goto abort;
1219                 }
1220                 if (!md_sb_equal(refsb, sb)) {
1221                         pr_warn("md: %pg has same UUID but different superblock to %pg\n",
1222                                 rdev->bdev, refdev->bdev);
1223                         goto abort;
1224                 }
1225                 ev1 = md_event(sb);
1226                 ev2 = md_event(refsb);
1227
1228                 if (!spare_disk && ev1 > ev2)
1229                         ret = 1;
1230                 else
1231                         ret = 0;
1232         }
1233         rdev->sectors = rdev->sb_start;
1234         /* Limit to 4TB as metadata cannot record more than that.
1235          * (not needed for Linear and RAID0 as metadata doesn't
1236          * record this size)
1237          */
1238         if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1239                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1240
1241         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1242                 /* "this cannot possibly happen" ... */
1243                 ret = -EINVAL;
1244
1245  abort:
1246         return ret;
1247 }
1248
1249 /*
1250  * validate_super for 0.90.0
1251  */
1252 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1253 {
1254         mdp_disk_t *desc;
1255         mdp_super_t *sb = page_address(rdev->sb_page);
1256         __u64 ev1 = md_event(sb);
1257
1258         rdev->raid_disk = -1;
1259         clear_bit(Faulty, &rdev->flags);
1260         clear_bit(In_sync, &rdev->flags);
1261         clear_bit(Bitmap_sync, &rdev->flags);
1262         clear_bit(WriteMostly, &rdev->flags);
1263
1264         if (mddev->raid_disks == 0) {
1265                 mddev->major_version = 0;
1266                 mddev->minor_version = sb->minor_version;
1267                 mddev->patch_version = sb->patch_version;
1268                 mddev->external = 0;
1269                 mddev->chunk_sectors = sb->chunk_size >> 9;
1270                 mddev->ctime = sb->ctime;
1271                 mddev->utime = sb->utime;
1272                 mddev->level = sb->level;
1273                 mddev->clevel[0] = 0;
1274                 mddev->layout = sb->layout;
1275                 mddev->raid_disks = sb->raid_disks;
1276                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1277                 mddev->events = ev1;
1278                 mddev->bitmap_info.offset = 0;
1279                 mddev->bitmap_info.space = 0;
1280                 /* bitmap can use 60 K after the 4K superblocks */
1281                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1282                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1283                 mddev->reshape_backwards = 0;
1284
1285                 if (mddev->minor_version >= 91) {
1286                         mddev->reshape_position = sb->reshape_position;
1287                         mddev->delta_disks = sb->delta_disks;
1288                         mddev->new_level = sb->new_level;
1289                         mddev->new_layout = sb->new_layout;
1290                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1291                         if (mddev->delta_disks < 0)
1292                                 mddev->reshape_backwards = 1;
1293                 } else {
1294                         mddev->reshape_position = MaxSector;
1295                         mddev->delta_disks = 0;
1296                         mddev->new_level = mddev->level;
1297                         mddev->new_layout = mddev->layout;
1298                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1299                 }
1300                 if (mddev->level == 0)
1301                         mddev->layout = -1;
1302
1303                 if (sb->state & (1<<MD_SB_CLEAN))
1304                         mddev->recovery_cp = MaxSector;
1305                 else {
1306                         if (sb->events_hi == sb->cp_events_hi &&
1307                                 sb->events_lo == sb->cp_events_lo) {
1308                                 mddev->recovery_cp = sb->recovery_cp;
1309                         } else
1310                                 mddev->recovery_cp = 0;
1311                 }
1312
1313                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1314                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1315                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1316                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1317
1318                 mddev->max_disks = MD_SB_DISKS;
1319
1320                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1321                     mddev->bitmap_info.file == NULL) {
1322                         mddev->bitmap_info.offset =
1323                                 mddev->bitmap_info.default_offset;
1324                         mddev->bitmap_info.space =
1325                                 mddev->bitmap_info.default_space;
1326                 }
1327
1328         } else if (mddev->pers == NULL) {
1329                 /* Insist on good event counter while assembling, except
1330                  * for spares (which don't need an event count) */
1331                 ++ev1;
1332                 if (sb->disks[rdev->desc_nr].state & (
1333                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1334                         if (ev1 < mddev->events)
1335                                 return -EINVAL;
1336         } else if (mddev->bitmap) {
1337                 /* if adding to array with a bitmap, then we can accept an
1338                  * older device ... but not too old.
1339                  */
1340                 if (ev1 < mddev->bitmap->events_cleared)
1341                         return 0;
1342                 if (ev1 < mddev->events)
1343                         set_bit(Bitmap_sync, &rdev->flags);
1344         } else {
1345                 if (ev1 < mddev->events)
1346                         /* just a hot-add of a new device, leave raid_disk at -1 */
1347                         return 0;
1348         }
1349
1350         if (mddev->level != LEVEL_MULTIPATH) {
1351                 desc = sb->disks + rdev->desc_nr;
1352
1353                 if (desc->state & (1<<MD_DISK_FAULTY))
1354                         set_bit(Faulty, &rdev->flags);
1355                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1356                             desc->raid_disk < mddev->raid_disks */) {
1357                         set_bit(In_sync, &rdev->flags);
1358                         rdev->raid_disk = desc->raid_disk;
1359                         rdev->saved_raid_disk = desc->raid_disk;
1360                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1361                         /* active but not in sync implies recovery up to
1362                          * reshape position.  We don't know exactly where
1363                          * that is, so set to zero for now */
1364                         if (mddev->minor_version >= 91) {
1365                                 rdev->recovery_offset = 0;
1366                                 rdev->raid_disk = desc->raid_disk;
1367                         }
1368                 }
1369                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1370                         set_bit(WriteMostly, &rdev->flags);
1371                 if (desc->state & (1<<MD_DISK_FAILFAST))
1372                         set_bit(FailFast, &rdev->flags);
1373         } else /* MULTIPATH are always insync */
1374                 set_bit(In_sync, &rdev->flags);
1375         return 0;
1376 }
1377
1378 /*
1379  * sync_super for 0.90.0
1380  */
1381 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1382 {
1383         mdp_super_t *sb;
1384         struct md_rdev *rdev2;
1385         int next_spare = mddev->raid_disks;
1386
1387         /* make rdev->sb match mddev data..
1388          *
1389          * 1/ zero out disks
1390          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1391          * 3/ any empty disks < next_spare become removed
1392          *
1393          * disks[0] gets initialised to REMOVED because
1394          * we cannot be sure from other fields if it has
1395          * been initialised or not.
1396          */
1397         int i;
1398         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1399
1400         rdev->sb_size = MD_SB_BYTES;
1401
1402         sb = page_address(rdev->sb_page);
1403
1404         memset(sb, 0, sizeof(*sb));
1405
1406         sb->md_magic = MD_SB_MAGIC;
1407         sb->major_version = mddev->major_version;
1408         sb->patch_version = mddev->patch_version;
1409         sb->gvalid_words  = 0; /* ignored */
1410         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1411         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1412         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1413         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1414
1415         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1416         sb->level = mddev->level;
1417         sb->size = mddev->dev_sectors / 2;
1418         sb->raid_disks = mddev->raid_disks;
1419         sb->md_minor = mddev->md_minor;
1420         sb->not_persistent = 0;
1421         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1422         sb->state = 0;
1423         sb->events_hi = (mddev->events>>32);
1424         sb->events_lo = (u32)mddev->events;
1425
1426         if (mddev->reshape_position == MaxSector)
1427                 sb->minor_version = 90;
1428         else {
1429                 sb->minor_version = 91;
1430                 sb->reshape_position = mddev->reshape_position;
1431                 sb->new_level = mddev->new_level;
1432                 sb->delta_disks = mddev->delta_disks;
1433                 sb->new_layout = mddev->new_layout;
1434                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1435         }
1436         mddev->minor_version = sb->minor_version;
1437         if (mddev->in_sync)
1438         {
1439                 sb->recovery_cp = mddev->recovery_cp;
1440                 sb->cp_events_hi = (mddev->events>>32);
1441                 sb->cp_events_lo = (u32)mddev->events;
1442                 if (mddev->recovery_cp == MaxSector)
1443                         sb->state = (1<< MD_SB_CLEAN);
1444         } else
1445                 sb->recovery_cp = 0;
1446
1447         sb->layout = mddev->layout;
1448         sb->chunk_size = mddev->chunk_sectors << 9;
1449
1450         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1451                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1452
1453         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1454         rdev_for_each(rdev2, mddev) {
1455                 mdp_disk_t *d;
1456                 int desc_nr;
1457                 int is_active = test_bit(In_sync, &rdev2->flags);
1458
1459                 if (rdev2->raid_disk >= 0 &&
1460                     sb->minor_version >= 91)
1461                         /* we have nowhere to store the recovery_offset,
1462                          * but if it is not below the reshape_position,
1463                          * we can piggy-back on that.
1464                          */
1465                         is_active = 1;
1466                 if (rdev2->raid_disk < 0 ||
1467                     test_bit(Faulty, &rdev2->flags))
1468                         is_active = 0;
1469                 if (is_active)
1470                         desc_nr = rdev2->raid_disk;
1471                 else
1472                         desc_nr = next_spare++;
1473                 rdev2->desc_nr = desc_nr;
1474                 d = &sb->disks[rdev2->desc_nr];
1475                 nr_disks++;
1476                 d->number = rdev2->desc_nr;
1477                 d->major = MAJOR(rdev2->bdev->bd_dev);
1478                 d->minor = MINOR(rdev2->bdev->bd_dev);
1479                 if (is_active)
1480                         d->raid_disk = rdev2->raid_disk;
1481                 else
1482                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1483                 if (test_bit(Faulty, &rdev2->flags))
1484                         d->state = (1<<MD_DISK_FAULTY);
1485                 else if (is_active) {
1486                         d->state = (1<<MD_DISK_ACTIVE);
1487                         if (test_bit(In_sync, &rdev2->flags))
1488                                 d->state |= (1<<MD_DISK_SYNC);
1489                         active++;
1490                         working++;
1491                 } else {
1492                         d->state = 0;
1493                         spare++;
1494                         working++;
1495                 }
1496                 if (test_bit(WriteMostly, &rdev2->flags))
1497                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1498                 if (test_bit(FailFast, &rdev2->flags))
1499                         d->state |= (1<<MD_DISK_FAILFAST);
1500         }
1501         /* now set the "removed" and "faulty" bits on any missing devices */
1502         for (i=0 ; i < mddev->raid_disks ; i++) {
1503                 mdp_disk_t *d = &sb->disks[i];
1504                 if (d->state == 0 && d->number == 0) {
1505                         d->number = i;
1506                         d->raid_disk = i;
1507                         d->state = (1<<MD_DISK_REMOVED);
1508                         d->state |= (1<<MD_DISK_FAULTY);
1509                         failed++;
1510                 }
1511         }
1512         sb->nr_disks = nr_disks;
1513         sb->active_disks = active;
1514         sb->working_disks = working;
1515         sb->failed_disks = failed;
1516         sb->spare_disks = spare;
1517
1518         sb->this_disk = sb->disks[rdev->desc_nr];
1519         sb->sb_csum = calc_sb_csum(sb);
1520 }
1521
1522 /*
1523  * rdev_size_change for 0.90.0
1524  */
1525 static unsigned long long
1526 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1527 {
1528         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1529                 return 0; /* component must fit device */
1530         if (rdev->mddev->bitmap_info.offset)
1531                 return 0; /* can't move bitmap */
1532         rdev->sb_start = calc_dev_sboffset(rdev);
1533         if (!num_sectors || num_sectors > rdev->sb_start)
1534                 num_sectors = rdev->sb_start;
1535         /* Limit to 4TB as metadata cannot record more than that.
1536          * 4TB == 2^32 KB, or 2*2^32 sectors.
1537          */
1538         if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1539                 num_sectors = (sector_t)(2ULL << 32) - 2;
1540         do {
1541                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1542                        rdev->sb_page);
1543         } while (md_super_wait(rdev->mddev) < 0);
1544         return num_sectors;
1545 }
1546
1547 static int
1548 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1549 {
1550         /* non-zero offset changes not possible with v0.90 */
1551         return new_offset == 0;
1552 }
1553
1554 /*
1555  * version 1 superblock
1556  */
1557
1558 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1559 {
1560         __le32 disk_csum;
1561         u32 csum;
1562         unsigned long long newcsum;
1563         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1564         __le32 *isuper = (__le32*)sb;
1565
1566         disk_csum = sb->sb_csum;
1567         sb->sb_csum = 0;
1568         newcsum = 0;
1569         for (; size >= 4; size -= 4)
1570                 newcsum += le32_to_cpu(*isuper++);
1571
1572         if (size == 2)
1573                 newcsum += le16_to_cpu(*(__le16*) isuper);
1574
1575         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1576         sb->sb_csum = disk_csum;
1577         return cpu_to_le32(csum);
1578 }
1579
1580 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1581 {
1582         struct mdp_superblock_1 *sb;
1583         int ret;
1584         sector_t sb_start;
1585         sector_t sectors;
1586         int bmask;
1587         bool spare_disk = true;
1588
1589         /*
1590          * Calculate the position of the superblock in 512byte sectors.
1591          * It is always aligned to a 4K boundary and
1592          * depeding on minor_version, it can be:
1593          * 0: At least 8K, but less than 12K, from end of device
1594          * 1: At start of device
1595          * 2: 4K from start of device.
1596          */
1597         switch(minor_version) {
1598         case 0:
1599                 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1600                 sb_start &= ~(sector_t)(4*2-1);
1601                 break;
1602         case 1:
1603                 sb_start = 0;
1604                 break;
1605         case 2:
1606                 sb_start = 8;
1607                 break;
1608         default:
1609                 return -EINVAL;
1610         }
1611         rdev->sb_start = sb_start;
1612
1613         /* superblock is rarely larger than 1K, but it can be larger,
1614          * and it is safe to read 4k, so we do that
1615          */
1616         ret = read_disk_sb(rdev, 4096);
1617         if (ret) return ret;
1618
1619         sb = page_address(rdev->sb_page);
1620
1621         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1622             sb->major_version != cpu_to_le32(1) ||
1623             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1624             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1625             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1626                 return -EINVAL;
1627
1628         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1629                 pr_warn("md: invalid superblock checksum on %pg\n",
1630                         rdev->bdev);
1631                 return -EINVAL;
1632         }
1633         if (le64_to_cpu(sb->data_size) < 10) {
1634                 pr_warn("md: data_size too small on %pg\n",
1635                         rdev->bdev);
1636                 return -EINVAL;
1637         }
1638         if (sb->pad0 ||
1639             sb->pad3[0] ||
1640             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1641                 /* Some padding is non-zero, might be a new feature */
1642                 return -EINVAL;
1643
1644         rdev->preferred_minor = 0xffff;
1645         rdev->data_offset = le64_to_cpu(sb->data_offset);
1646         rdev->new_data_offset = rdev->data_offset;
1647         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1648             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1649                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1650         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1651
1652         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1653         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1654         if (rdev->sb_size & bmask)
1655                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1656
1657         if (minor_version
1658             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1659                 return -EINVAL;
1660         if (minor_version
1661             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1662                 return -EINVAL;
1663
1664         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1665                 rdev->desc_nr = -1;
1666         else
1667                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1668
1669         if (!rdev->bb_page) {
1670                 rdev->bb_page = alloc_page(GFP_KERNEL);
1671                 if (!rdev->bb_page)
1672                         return -ENOMEM;
1673         }
1674         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1675             rdev->badblocks.count == 0) {
1676                 /* need to load the bad block list.
1677                  * Currently we limit it to one page.
1678                  */
1679                 s32 offset;
1680                 sector_t bb_sector;
1681                 __le64 *bbp;
1682                 int i;
1683                 int sectors = le16_to_cpu(sb->bblog_size);
1684                 if (sectors > (PAGE_SIZE / 512))
1685                         return -EINVAL;
1686                 offset = le32_to_cpu(sb->bblog_offset);
1687                 if (offset == 0)
1688                         return -EINVAL;
1689                 bb_sector = (long long)offset;
1690                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1691                                   rdev->bb_page, REQ_OP_READ, true))
1692                         return -EIO;
1693                 bbp = (__le64 *)page_address(rdev->bb_page);
1694                 rdev->badblocks.shift = sb->bblog_shift;
1695                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1696                         u64 bb = le64_to_cpu(*bbp);
1697                         int count = bb & (0x3ff);
1698                         u64 sector = bb >> 10;
1699                         sector <<= sb->bblog_shift;
1700                         count <<= sb->bblog_shift;
1701                         if (bb + 1 == 0)
1702                                 break;
1703                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1704                                 return -EINVAL;
1705                 }
1706         } else if (sb->bblog_offset != 0)
1707                 rdev->badblocks.shift = 0;
1708
1709         if ((le32_to_cpu(sb->feature_map) &
1710             (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
1711                 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1712                 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1713                 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1714         }
1715
1716         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) &&
1717             sb->level != 0)
1718                 return -EINVAL;
1719
1720         /* not spare disk, or LEVEL_MULTIPATH */
1721         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH) ||
1722                 (rdev->desc_nr >= 0 &&
1723                 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1724                 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1725                  le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1726                 spare_disk = false;
1727
1728         if (!refdev) {
1729                 if (!spare_disk)
1730                         ret = 1;
1731                 else
1732                         ret = 0;
1733         } else {
1734                 __u64 ev1, ev2;
1735                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1736
1737                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1738                     sb->level != refsb->level ||
1739                     sb->layout != refsb->layout ||
1740                     sb->chunksize != refsb->chunksize) {
1741                         pr_warn("md: %pg has strangely different superblock to %pg\n",
1742                                 rdev->bdev,
1743                                 refdev->bdev);
1744                         return -EINVAL;
1745                 }
1746                 ev1 = le64_to_cpu(sb->events);
1747                 ev2 = le64_to_cpu(refsb->events);
1748
1749                 if (!spare_disk && ev1 > ev2)
1750                         ret = 1;
1751                 else
1752                         ret = 0;
1753         }
1754         if (minor_version)
1755                 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1756         else
1757                 sectors = rdev->sb_start;
1758         if (sectors < le64_to_cpu(sb->data_size))
1759                 return -EINVAL;
1760         rdev->sectors = le64_to_cpu(sb->data_size);
1761         return ret;
1762 }
1763
1764 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1765 {
1766         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1767         __u64 ev1 = le64_to_cpu(sb->events);
1768
1769         rdev->raid_disk = -1;
1770         clear_bit(Faulty, &rdev->flags);
1771         clear_bit(In_sync, &rdev->flags);
1772         clear_bit(Bitmap_sync, &rdev->flags);
1773         clear_bit(WriteMostly, &rdev->flags);
1774
1775         if (mddev->raid_disks == 0) {
1776                 mddev->major_version = 1;
1777                 mddev->patch_version = 0;
1778                 mddev->external = 0;
1779                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1780                 mddev->ctime = le64_to_cpu(sb->ctime);
1781                 mddev->utime = le64_to_cpu(sb->utime);
1782                 mddev->level = le32_to_cpu(sb->level);
1783                 mddev->clevel[0] = 0;
1784                 mddev->layout = le32_to_cpu(sb->layout);
1785                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1786                 mddev->dev_sectors = le64_to_cpu(sb->size);
1787                 mddev->events = ev1;
1788                 mddev->bitmap_info.offset = 0;
1789                 mddev->bitmap_info.space = 0;
1790                 /* Default location for bitmap is 1K after superblock
1791                  * using 3K - total of 4K
1792                  */
1793                 mddev->bitmap_info.default_offset = 1024 >> 9;
1794                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1795                 mddev->reshape_backwards = 0;
1796
1797                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1798                 memcpy(mddev->uuid, sb->set_uuid, 16);
1799
1800                 mddev->max_disks =  (4096-256)/2;
1801
1802                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1803                     mddev->bitmap_info.file == NULL) {
1804                         mddev->bitmap_info.offset =
1805                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1806                         /* Metadata doesn't record how much space is available.
1807                          * For 1.0, we assume we can use up to the superblock
1808                          * if before, else to 4K beyond superblock.
1809                          * For others, assume no change is possible.
1810                          */
1811                         if (mddev->minor_version > 0)
1812                                 mddev->bitmap_info.space = 0;
1813                         else if (mddev->bitmap_info.offset > 0)
1814                                 mddev->bitmap_info.space =
1815                                         8 - mddev->bitmap_info.offset;
1816                         else
1817                                 mddev->bitmap_info.space =
1818                                         -mddev->bitmap_info.offset;
1819                 }
1820
1821                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1822                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1823                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1824                         mddev->new_level = le32_to_cpu(sb->new_level);
1825                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1826                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1827                         if (mddev->delta_disks < 0 ||
1828                             (mddev->delta_disks == 0 &&
1829                              (le32_to_cpu(sb->feature_map)
1830                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1831                                 mddev->reshape_backwards = 1;
1832                 } else {
1833                         mddev->reshape_position = MaxSector;
1834                         mddev->delta_disks = 0;
1835                         mddev->new_level = mddev->level;
1836                         mddev->new_layout = mddev->layout;
1837                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1838                 }
1839
1840                 if (mddev->level == 0 &&
1841                     !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT))
1842                         mddev->layout = -1;
1843
1844                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1845                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1846
1847                 if (le32_to_cpu(sb->feature_map) &
1848                     (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
1849                         if (le32_to_cpu(sb->feature_map) &
1850                             (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
1851                                 return -EINVAL;
1852                         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
1853                             (le32_to_cpu(sb->feature_map) &
1854                                             MD_FEATURE_MULTIPLE_PPLS))
1855                                 return -EINVAL;
1856                         set_bit(MD_HAS_PPL, &mddev->flags);
1857                 }
1858         } else if (mddev->pers == NULL) {
1859                 /* Insist of good event counter while assembling, except for
1860                  * spares (which don't need an event count) */
1861                 ++ev1;
1862                 if (rdev->desc_nr >= 0 &&
1863                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1864                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1865                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1866                         if (ev1 < mddev->events)
1867                                 return -EINVAL;
1868         } else if (mddev->bitmap) {
1869                 /* If adding to array with a bitmap, then we can accept an
1870                  * older device, but not too old.
1871                  */
1872                 if (ev1 < mddev->bitmap->events_cleared)
1873                         return 0;
1874                 if (ev1 < mddev->events)
1875                         set_bit(Bitmap_sync, &rdev->flags);
1876         } else {
1877                 if (ev1 < mddev->events)
1878                         /* just a hot-add of a new device, leave raid_disk at -1 */
1879                         return 0;
1880         }
1881         if (mddev->level != LEVEL_MULTIPATH) {
1882                 int role;
1883                 if (rdev->desc_nr < 0 ||
1884                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1885                         role = MD_DISK_ROLE_SPARE;
1886                         rdev->desc_nr = -1;
1887                 } else
1888                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1889                 switch(role) {
1890                 case MD_DISK_ROLE_SPARE: /* spare */
1891                         break;
1892                 case MD_DISK_ROLE_FAULTY: /* faulty */
1893                         set_bit(Faulty, &rdev->flags);
1894                         break;
1895                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1896                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1897                                 /* journal device without journal feature */
1898                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1899                                 return -EINVAL;
1900                         }
1901                         set_bit(Journal, &rdev->flags);
1902                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1903                         rdev->raid_disk = 0;
1904                         break;
1905                 default:
1906                         rdev->saved_raid_disk = role;
1907                         if ((le32_to_cpu(sb->feature_map) &
1908                              MD_FEATURE_RECOVERY_OFFSET)) {
1909                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1910                                 if (!(le32_to_cpu(sb->feature_map) &
1911                                       MD_FEATURE_RECOVERY_BITMAP))
1912                                         rdev->saved_raid_disk = -1;
1913                         } else {
1914                                 /*
1915                                  * If the array is FROZEN, then the device can't
1916                                  * be in_sync with rest of array.
1917                                  */
1918                                 if (!test_bit(MD_RECOVERY_FROZEN,
1919                                               &mddev->recovery))
1920                                         set_bit(In_sync, &rdev->flags);
1921                         }
1922                         rdev->raid_disk = role;
1923                         break;
1924                 }
1925                 if (sb->devflags & WriteMostly1)
1926                         set_bit(WriteMostly, &rdev->flags);
1927                 if (sb->devflags & FailFast1)
1928                         set_bit(FailFast, &rdev->flags);
1929                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1930                         set_bit(Replacement, &rdev->flags);
1931         } else /* MULTIPATH are always insync */
1932                 set_bit(In_sync, &rdev->flags);
1933
1934         return 0;
1935 }
1936
1937 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1938 {
1939         struct mdp_superblock_1 *sb;
1940         struct md_rdev *rdev2;
1941         int max_dev, i;
1942         /* make rdev->sb match mddev and rdev data. */
1943
1944         sb = page_address(rdev->sb_page);
1945
1946         sb->feature_map = 0;
1947         sb->pad0 = 0;
1948         sb->recovery_offset = cpu_to_le64(0);
1949         memset(sb->pad3, 0, sizeof(sb->pad3));
1950
1951         sb->utime = cpu_to_le64((__u64)mddev->utime);
1952         sb->events = cpu_to_le64(mddev->events);
1953         if (mddev->in_sync)
1954                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1955         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1956                 sb->resync_offset = cpu_to_le64(MaxSector);
1957         else
1958                 sb->resync_offset = cpu_to_le64(0);
1959
1960         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1961
1962         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1963         sb->size = cpu_to_le64(mddev->dev_sectors);
1964         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1965         sb->level = cpu_to_le32(mddev->level);
1966         sb->layout = cpu_to_le32(mddev->layout);
1967         if (test_bit(FailFast, &rdev->flags))
1968                 sb->devflags |= FailFast1;
1969         else
1970                 sb->devflags &= ~FailFast1;
1971
1972         if (test_bit(WriteMostly, &rdev->flags))
1973                 sb->devflags |= WriteMostly1;
1974         else
1975                 sb->devflags &= ~WriteMostly1;
1976         sb->data_offset = cpu_to_le64(rdev->data_offset);
1977         sb->data_size = cpu_to_le64(rdev->sectors);
1978
1979         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1980                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1981                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1982         }
1983
1984         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1985             !test_bit(In_sync, &rdev->flags)) {
1986                 sb->feature_map |=
1987                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1988                 sb->recovery_offset =
1989                         cpu_to_le64(rdev->recovery_offset);
1990                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1991                         sb->feature_map |=
1992                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1993         }
1994         /* Note: recovery_offset and journal_tail share space  */
1995         if (test_bit(Journal, &rdev->flags))
1996                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1997         if (test_bit(Replacement, &rdev->flags))
1998                 sb->feature_map |=
1999                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
2000
2001         if (mddev->reshape_position != MaxSector) {
2002                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
2003                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2004                 sb->new_layout = cpu_to_le32(mddev->new_layout);
2005                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2006                 sb->new_level = cpu_to_le32(mddev->new_level);
2007                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
2008                 if (mddev->delta_disks == 0 &&
2009                     mddev->reshape_backwards)
2010                         sb->feature_map
2011                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
2012                 if (rdev->new_data_offset != rdev->data_offset) {
2013                         sb->feature_map
2014                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
2015                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2016                                                              - rdev->data_offset));
2017                 }
2018         }
2019
2020         if (mddev_is_clustered(mddev))
2021                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
2022
2023         if (rdev->badblocks.count == 0)
2024                 /* Nothing to do for bad blocks*/ ;
2025         else if (sb->bblog_offset == 0)
2026                 /* Cannot record bad blocks on this device */
2027                 md_error(mddev, rdev);
2028         else {
2029                 struct badblocks *bb = &rdev->badblocks;
2030                 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2031                 u64 *p = bb->page;
2032                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
2033                 if (bb->changed) {
2034                         unsigned seq;
2035
2036 retry:
2037                         seq = read_seqbegin(&bb->lock);
2038
2039                         memset(bbp, 0xff, PAGE_SIZE);
2040
2041                         for (i = 0 ; i < bb->count ; i++) {
2042                                 u64 internal_bb = p[i];
2043                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
2044                                                 | BB_LEN(internal_bb));
2045                                 bbp[i] = cpu_to_le64(store_bb);
2046                         }
2047                         bb->changed = 0;
2048                         if (read_seqretry(&bb->lock, seq))
2049                                 goto retry;
2050
2051                         bb->sector = (rdev->sb_start +
2052                                       (int)le32_to_cpu(sb->bblog_offset));
2053                         bb->size = le16_to_cpu(sb->bblog_size);
2054                 }
2055         }
2056
2057         max_dev = 0;
2058         rdev_for_each(rdev2, mddev)
2059                 if (rdev2->desc_nr+1 > max_dev)
2060                         max_dev = rdev2->desc_nr+1;
2061
2062         if (max_dev > le32_to_cpu(sb->max_dev)) {
2063                 int bmask;
2064                 sb->max_dev = cpu_to_le32(max_dev);
2065                 rdev->sb_size = max_dev * 2 + 256;
2066                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2067                 if (rdev->sb_size & bmask)
2068                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
2069         } else
2070                 max_dev = le32_to_cpu(sb->max_dev);
2071
2072         for (i=0; i<max_dev;i++)
2073                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2074
2075         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
2076                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
2077
2078         if (test_bit(MD_HAS_PPL, &mddev->flags)) {
2079                 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
2080                         sb->feature_map |=
2081                             cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
2082                 else
2083                         sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
2084                 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2085                 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2086         }
2087
2088         rdev_for_each(rdev2, mddev) {
2089                 i = rdev2->desc_nr;
2090                 if (test_bit(Faulty, &rdev2->flags))
2091                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
2092                 else if (test_bit(In_sync, &rdev2->flags))
2093                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2094                 else if (test_bit(Journal, &rdev2->flags))
2095                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
2096                 else if (rdev2->raid_disk >= 0)
2097                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
2098                 else
2099                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
2100         }
2101
2102         sb->sb_csum = calc_sb_1_csum(sb);
2103 }
2104
2105 static sector_t super_1_choose_bm_space(sector_t dev_size)
2106 {
2107         sector_t bm_space;
2108
2109         /* if the device is bigger than 8Gig, save 64k for bitmap
2110          * usage, if bigger than 200Gig, save 128k
2111          */
2112         if (dev_size < 64*2)
2113                 bm_space = 0;
2114         else if (dev_size - 64*2 >= 200*1024*1024*2)
2115                 bm_space = 128*2;
2116         else if (dev_size - 4*2 > 8*1024*1024*2)
2117                 bm_space = 64*2;
2118         else
2119                 bm_space = 4*2;
2120         return bm_space;
2121 }
2122
2123 static unsigned long long
2124 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2125 {
2126         struct mdp_superblock_1 *sb;
2127         sector_t max_sectors;
2128         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2129                 return 0; /* component must fit device */
2130         if (rdev->data_offset != rdev->new_data_offset)
2131                 return 0; /* too confusing */
2132         if (rdev->sb_start < rdev->data_offset) {
2133                 /* minor versions 1 and 2; superblock before data */
2134                 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2135                 if (!num_sectors || num_sectors > max_sectors)
2136                         num_sectors = max_sectors;
2137         } else if (rdev->mddev->bitmap_info.offset) {
2138                 /* minor version 0 with bitmap we can't move */
2139                 return 0;
2140         } else {
2141                 /* minor version 0; superblock after data */
2142                 sector_t sb_start, bm_space;
2143                 sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2144
2145                 /* 8K is for superblock */
2146                 sb_start = dev_size - 8*2;
2147                 sb_start &= ~(sector_t)(4*2 - 1);
2148
2149                 bm_space = super_1_choose_bm_space(dev_size);
2150
2151                 /* Space that can be used to store date needs to decrease
2152                  * superblock bitmap space and bad block space(4K)
2153                  */
2154                 max_sectors = sb_start - bm_space - 4*2;
2155
2156                 if (!num_sectors || num_sectors > max_sectors)
2157                         num_sectors = max_sectors;
2158                 rdev->sb_start = sb_start;
2159         }
2160         sb = page_address(rdev->sb_page);
2161         sb->data_size = cpu_to_le64(num_sectors);
2162         sb->super_offset = cpu_to_le64(rdev->sb_start);
2163         sb->sb_csum = calc_sb_1_csum(sb);
2164         do {
2165                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2166                                rdev->sb_page);
2167         } while (md_super_wait(rdev->mddev) < 0);
2168         return num_sectors;
2169
2170 }
2171
2172 static int
2173 super_1_allow_new_offset(struct md_rdev *rdev,
2174                          unsigned long long new_offset)
2175 {
2176         /* All necessary checks on new >= old have been done */
2177         struct bitmap *bitmap;
2178         if (new_offset >= rdev->data_offset)
2179                 return 1;
2180
2181         /* with 1.0 metadata, there is no metadata to tread on
2182          * so we can always move back */
2183         if (rdev->mddev->minor_version == 0)
2184                 return 1;
2185
2186         /* otherwise we must be sure not to step on
2187          * any metadata, so stay:
2188          * 36K beyond start of superblock
2189          * beyond end of badblocks
2190          * beyond write-intent bitmap
2191          */
2192         if (rdev->sb_start + (32+4)*2 > new_offset)
2193                 return 0;
2194         bitmap = rdev->mddev->bitmap;
2195         if (bitmap && !rdev->mddev->bitmap_info.file &&
2196             rdev->sb_start + rdev->mddev->bitmap_info.offset +
2197             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
2198                 return 0;
2199         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2200                 return 0;
2201
2202         return 1;
2203 }
2204
2205 static struct super_type super_types[] = {
2206         [0] = {
2207                 .name   = "0.90.0",
2208                 .owner  = THIS_MODULE,
2209                 .load_super         = super_90_load,
2210                 .validate_super     = super_90_validate,
2211                 .sync_super         = super_90_sync,
2212                 .rdev_size_change   = super_90_rdev_size_change,
2213                 .allow_new_offset   = super_90_allow_new_offset,
2214         },
2215         [1] = {
2216                 .name   = "md-1",
2217                 .owner  = THIS_MODULE,
2218                 .load_super         = super_1_load,
2219                 .validate_super     = super_1_validate,
2220                 .sync_super         = super_1_sync,
2221                 .rdev_size_change   = super_1_rdev_size_change,
2222                 .allow_new_offset   = super_1_allow_new_offset,
2223         },
2224 };
2225
2226 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2227 {
2228         if (mddev->sync_super) {
2229                 mddev->sync_super(mddev, rdev);
2230                 return;
2231         }
2232
2233         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
2234
2235         super_types[mddev->major_version].sync_super(mddev, rdev);
2236 }
2237
2238 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
2239 {
2240         struct md_rdev *rdev, *rdev2;
2241
2242         rcu_read_lock();
2243         rdev_for_each_rcu(rdev, mddev1) {
2244                 if (test_bit(Faulty, &rdev->flags) ||
2245                     test_bit(Journal, &rdev->flags) ||
2246                     rdev->raid_disk == -1)
2247                         continue;
2248                 rdev_for_each_rcu(rdev2, mddev2) {
2249                         if (test_bit(Faulty, &rdev2->flags) ||
2250                             test_bit(Journal, &rdev2->flags) ||
2251                             rdev2->raid_disk == -1)
2252                                 continue;
2253                         if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2254                                 rcu_read_unlock();
2255                                 return 1;
2256                         }
2257                 }
2258         }
2259         rcu_read_unlock();
2260         return 0;
2261 }
2262
2263 static LIST_HEAD(pending_raid_disks);
2264
2265 /*
2266  * Try to register data integrity profile for an mddev
2267  *
2268  * This is called when an array is started and after a disk has been kicked
2269  * from the array. It only succeeds if all working and active component devices
2270  * are integrity capable with matching profiles.
2271  */
2272 int md_integrity_register(struct mddev *mddev)
2273 {
2274         struct md_rdev *rdev, *reference = NULL;
2275
2276         if (list_empty(&mddev->disks))
2277                 return 0; /* nothing to do */
2278         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2279                 return 0; /* shouldn't register, or already is */
2280         rdev_for_each(rdev, mddev) {
2281                 /* skip spares and non-functional disks */
2282                 if (test_bit(Faulty, &rdev->flags))
2283                         continue;
2284                 if (rdev->raid_disk < 0)
2285                         continue;
2286                 if (!reference) {
2287                         /* Use the first rdev as the reference */
2288                         reference = rdev;
2289                         continue;
2290                 }
2291                 /* does this rdev's profile match the reference profile? */
2292                 if (blk_integrity_compare(reference->bdev->bd_disk,
2293                                 rdev->bdev->bd_disk) < 0)
2294                         return -EINVAL;
2295         }
2296         if (!reference || !bdev_get_integrity(reference->bdev))
2297                 return 0;
2298         /*
2299          * All component devices are integrity capable and have matching
2300          * profiles, register the common profile for the md device.
2301          */
2302         blk_integrity_register(mddev->gendisk,
2303                                bdev_get_integrity(reference->bdev));
2304
2305         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2306         if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) ||
2307             (mddev->level != 1 && mddev->level != 10 &&
2308              bioset_integrity_create(&mddev->io_acct_set, BIO_POOL_SIZE))) {
2309                 /*
2310                  * No need to handle the failure of bioset_integrity_create,
2311                  * because the function is called by md_run() -> pers->run(),
2312                  * md_run calls bioset_exit -> bioset_integrity_free in case
2313                  * of failure case.
2314                  */
2315                 pr_err("md: failed to create integrity pool for %s\n",
2316                        mdname(mddev));
2317                 return -EINVAL;
2318         }
2319         return 0;
2320 }
2321 EXPORT_SYMBOL(md_integrity_register);
2322
2323 /*
2324  * Attempt to add an rdev, but only if it is consistent with the current
2325  * integrity profile
2326  */
2327 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2328 {
2329         struct blk_integrity *bi_mddev;
2330
2331         if (!mddev->gendisk)
2332                 return 0;
2333
2334         bi_mddev = blk_get_integrity(mddev->gendisk);
2335
2336         if (!bi_mddev) /* nothing to do */
2337                 return 0;
2338
2339         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2340                 pr_err("%s: incompatible integrity profile for %pg\n",
2341                        mdname(mddev), rdev->bdev);
2342                 return -ENXIO;
2343         }
2344
2345         return 0;
2346 }
2347 EXPORT_SYMBOL(md_integrity_add_rdev);
2348
2349 static bool rdev_read_only(struct md_rdev *rdev)
2350 {
2351         return bdev_read_only(rdev->bdev) ||
2352                 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2353 }
2354
2355 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2356 {
2357         char b[BDEVNAME_SIZE];
2358         int err;
2359
2360         /* prevent duplicates */
2361         if (find_rdev(mddev, rdev->bdev->bd_dev))
2362                 return -EEXIST;
2363
2364         if (rdev_read_only(rdev) && mddev->pers)
2365                 return -EROFS;
2366
2367         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2368         if (!test_bit(Journal, &rdev->flags) &&
2369             rdev->sectors &&
2370             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2371                 if (mddev->pers) {
2372                         /* Cannot change size, so fail
2373                          * If mddev->level <= 0, then we don't care
2374                          * about aligning sizes (e.g. linear)
2375                          */
2376                         if (mddev->level > 0)
2377                                 return -ENOSPC;
2378                 } else
2379                         mddev->dev_sectors = rdev->sectors;
2380         }
2381
2382         /* Verify rdev->desc_nr is unique.
2383          * If it is -1, assign a free number, else
2384          * check number is not in use
2385          */
2386         rcu_read_lock();
2387         if (rdev->desc_nr < 0) {
2388                 int choice = 0;
2389                 if (mddev->pers)
2390                         choice = mddev->raid_disks;
2391                 while (md_find_rdev_nr_rcu(mddev, choice))
2392                         choice++;
2393                 rdev->desc_nr = choice;
2394         } else {
2395                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2396                         rcu_read_unlock();
2397                         return -EBUSY;
2398                 }
2399         }
2400         rcu_read_unlock();
2401         if (!test_bit(Journal, &rdev->flags) &&
2402             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2403                 pr_warn("md: %s: array is limited to %d devices\n",
2404                         mdname(mddev), mddev->max_disks);
2405                 return -EBUSY;
2406         }
2407         snprintf(b, sizeof(b), "%pg", rdev->bdev);
2408         strreplace(b, '/', '!');
2409
2410         rdev->mddev = mddev;
2411         pr_debug("md: bind<%s>\n", b);
2412
2413         if (mddev->raid_disks)
2414                 mddev_create_serial_pool(mddev, rdev, false);
2415
2416         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2417                 goto fail;
2418
2419         /* failure here is OK */
2420         err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2421         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2422         rdev->sysfs_unack_badblocks =
2423                 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2424         rdev->sysfs_badblocks =
2425                 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2426
2427         list_add_rcu(&rdev->same_set, &mddev->disks);
2428         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2429
2430         /* May as well allow recovery to be retried once */
2431         mddev->recovery_disabled++;
2432
2433         return 0;
2434
2435  fail:
2436         pr_warn("md: failed to register dev-%s for %s\n",
2437                 b, mdname(mddev));
2438         return err;
2439 }
2440
2441 void md_autodetect_dev(dev_t dev);
2442
2443 /* just for claiming the bdev */
2444 static struct md_rdev claim_rdev;
2445
2446 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
2447 {
2448         pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2449         md_rdev_clear(rdev);
2450 #ifndef MODULE
2451         if (test_bit(AutoDetected, &rdev->flags))
2452                 md_autodetect_dev(rdev->bdev->bd_dev);
2453 #endif
2454         blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev);
2455         rdev->bdev = NULL;
2456         kobject_put(&rdev->kobj);
2457 }
2458
2459 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2460 {
2461         struct mddev *mddev = rdev->mddev;
2462
2463         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2464         list_del_rcu(&rdev->same_set);
2465         pr_debug("md: unbind<%pg>\n", rdev->bdev);
2466         mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2467         rdev->mddev = NULL;
2468         sysfs_remove_link(&rdev->kobj, "block");
2469         sysfs_put(rdev->sysfs_state);
2470         sysfs_put(rdev->sysfs_unack_badblocks);
2471         sysfs_put(rdev->sysfs_badblocks);
2472         rdev->sysfs_state = NULL;
2473         rdev->sysfs_unack_badblocks = NULL;
2474         rdev->sysfs_badblocks = NULL;
2475         rdev->badblocks.count = 0;
2476
2477         synchronize_rcu();
2478
2479         /*
2480          * kobject_del() will wait for all in progress writers to be done, where
2481          * reconfig_mutex is held, hence it can't be called under
2482          * reconfig_mutex and it's delayed to mddev_unlock().
2483          */
2484         list_add(&rdev->same_set, &mddev->deleting);
2485 }
2486
2487 static void export_array(struct mddev *mddev)
2488 {
2489         struct md_rdev *rdev;
2490
2491         while (!list_empty(&mddev->disks)) {
2492                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2493                                         same_set);
2494                 md_kick_rdev_from_array(rdev);
2495         }
2496         mddev->raid_disks = 0;
2497         mddev->major_version = 0;
2498 }
2499
2500 static bool set_in_sync(struct mddev *mddev)
2501 {
2502         lockdep_assert_held(&mddev->lock);
2503         if (!mddev->in_sync) {
2504                 mddev->sync_checkers++;
2505                 spin_unlock(&mddev->lock);
2506                 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
2507                 spin_lock(&mddev->lock);
2508                 if (!mddev->in_sync &&
2509                     percpu_ref_is_zero(&mddev->writes_pending)) {
2510                         mddev->in_sync = 1;
2511                         /*
2512                          * Ensure ->in_sync is visible before we clear
2513                          * ->sync_checkers.
2514                          */
2515                         smp_mb();
2516                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2517                         sysfs_notify_dirent_safe(mddev->sysfs_state);
2518                 }
2519                 if (--mddev->sync_checkers == 0)
2520                         percpu_ref_switch_to_percpu(&mddev->writes_pending);
2521         }
2522         if (mddev->safemode == 1)
2523                 mddev->safemode = 0;
2524         return mddev->in_sync;
2525 }
2526
2527 static void sync_sbs(struct mddev *mddev, int nospares)
2528 {
2529         /* Update each superblock (in-memory image), but
2530          * if we are allowed to, skip spares which already
2531          * have the right event counter, or have one earlier
2532          * (which would mean they aren't being marked as dirty
2533          * with the rest of the array)
2534          */
2535         struct md_rdev *rdev;
2536         rdev_for_each(rdev, mddev) {
2537                 if (rdev->sb_events == mddev->events ||
2538                     (nospares &&
2539                      rdev->raid_disk < 0 &&
2540                      rdev->sb_events+1 == mddev->events)) {
2541                         /* Don't update this superblock */
2542                         rdev->sb_loaded = 2;
2543                 } else {
2544                         sync_super(mddev, rdev);
2545                         rdev->sb_loaded = 1;
2546                 }
2547         }
2548 }
2549
2550 static bool does_sb_need_changing(struct mddev *mddev)
2551 {
2552         struct md_rdev *rdev = NULL, *iter;
2553         struct mdp_superblock_1 *sb;
2554         int role;
2555
2556         /* Find a good rdev */
2557         rdev_for_each(iter, mddev)
2558                 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) {
2559                         rdev = iter;
2560                         break;
2561                 }
2562
2563         /* No good device found. */
2564         if (!rdev)
2565                 return false;
2566
2567         sb = page_address(rdev->sb_page);
2568         /* Check if a device has become faulty or a spare become active */
2569         rdev_for_each(rdev, mddev) {
2570                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2571                 /* Device activated? */
2572                 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2573                     !test_bit(Faulty, &rdev->flags))
2574                         return true;
2575                 /* Device turned faulty? */
2576                 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2577                         return true;
2578         }
2579
2580         /* Check if any mddev parameters have changed */
2581         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2582             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2583             (mddev->layout != le32_to_cpu(sb->layout)) ||
2584             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2585             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2586                 return true;
2587
2588         return false;
2589 }
2590
2591 void md_update_sb(struct mddev *mddev, int force_change)
2592 {
2593         struct md_rdev *rdev;
2594         int sync_req;
2595         int nospares = 0;
2596         int any_badblocks_changed = 0;
2597         int ret = -1;
2598
2599         if (!md_is_rdwr(mddev)) {
2600                 if (force_change)
2601                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2602                 return;
2603         }
2604
2605 repeat:
2606         if (mddev_is_clustered(mddev)) {
2607                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2608                         force_change = 1;
2609                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2610                         nospares = 1;
2611                 ret = md_cluster_ops->metadata_update_start(mddev);
2612                 /* Has someone else has updated the sb */
2613                 if (!does_sb_need_changing(mddev)) {
2614                         if (ret == 0)
2615                                 md_cluster_ops->metadata_update_cancel(mddev);
2616                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2617                                                          BIT(MD_SB_CHANGE_DEVS) |
2618                                                          BIT(MD_SB_CHANGE_CLEAN));
2619                         return;
2620                 }
2621         }
2622
2623         /*
2624          * First make sure individual recovery_offsets are correct
2625          * curr_resync_completed can only be used during recovery.
2626          * During reshape/resync it might use array-addresses rather
2627          * that device addresses.
2628          */
2629         rdev_for_each(rdev, mddev) {
2630                 if (rdev->raid_disk >= 0 &&
2631                     mddev->delta_disks >= 0 &&
2632                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
2633                     test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
2634                     !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2635                     !test_bit(Journal, &rdev->flags) &&
2636                     !test_bit(In_sync, &rdev->flags) &&
2637                     mddev->curr_resync_completed > rdev->recovery_offset)
2638                                 rdev->recovery_offset = mddev->curr_resync_completed;
2639
2640         }
2641         if (!mddev->persistent) {
2642                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2643                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2644                 if (!mddev->external) {
2645                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2646                         rdev_for_each(rdev, mddev) {
2647                                 if (rdev->badblocks.changed) {
2648                                         rdev->badblocks.changed = 0;
2649                                         ack_all_badblocks(&rdev->badblocks);
2650                                         md_error(mddev, rdev);
2651                                 }
2652                                 clear_bit(Blocked, &rdev->flags);
2653                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2654                                 wake_up(&rdev->blocked_wait);
2655                         }
2656                 }
2657                 wake_up(&mddev->sb_wait);
2658                 return;
2659         }
2660
2661         spin_lock(&mddev->lock);
2662
2663         mddev->utime = ktime_get_real_seconds();
2664
2665         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2666                 force_change = 1;
2667         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2668                 /* just a clean<-> dirty transition, possibly leave spares alone,
2669                  * though if events isn't the right even/odd, we will have to do
2670                  * spares after all
2671                  */
2672                 nospares = 1;
2673         if (force_change)
2674                 nospares = 0;
2675         if (mddev->degraded)
2676                 /* If the array is degraded, then skipping spares is both
2677                  * dangerous and fairly pointless.
2678                  * Dangerous because a device that was removed from the array
2679                  * might have a event_count that still looks up-to-date,
2680                  * so it can be re-added without a resync.
2681                  * Pointless because if there are any spares to skip,
2682                  * then a recovery will happen and soon that array won't
2683                  * be degraded any more and the spare can go back to sleep then.
2684                  */
2685                 nospares = 0;
2686
2687         sync_req = mddev->in_sync;
2688
2689         /* If this is just a dirty<->clean transition, and the array is clean
2690          * and 'events' is odd, we can roll back to the previous clean state */
2691         if (nospares
2692             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2693             && mddev->can_decrease_events
2694             && mddev->events != 1) {
2695                 mddev->events--;
2696                 mddev->can_decrease_events = 0;
2697         } else {
2698                 /* otherwise we have to go forward and ... */
2699                 mddev->events ++;
2700                 mddev->can_decrease_events = nospares;
2701         }
2702
2703         /*
2704          * This 64-bit counter should never wrap.
2705          * Either we are in around ~1 trillion A.C., assuming
2706          * 1 reboot per second, or we have a bug...
2707          */
2708         WARN_ON(mddev->events == 0);
2709
2710         rdev_for_each(rdev, mddev) {
2711                 if (rdev->badblocks.changed)
2712                         any_badblocks_changed++;
2713                 if (test_bit(Faulty, &rdev->flags))
2714                         set_bit(FaultRecorded, &rdev->flags);
2715         }
2716
2717         sync_sbs(mddev, nospares);
2718         spin_unlock(&mddev->lock);
2719
2720         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2721                  mdname(mddev), mddev->in_sync);
2722
2723         if (mddev->queue)
2724                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2725 rewrite:
2726         md_bitmap_update_sb(mddev->bitmap);
2727         rdev_for_each(rdev, mddev) {
2728                 if (rdev->sb_loaded != 1)
2729                         continue; /* no noise on spare devices */
2730
2731                 if (!test_bit(Faulty, &rdev->flags)) {
2732                         md_super_write(mddev,rdev,
2733                                        rdev->sb_start, rdev->sb_size,
2734                                        rdev->sb_page);
2735                         pr_debug("md: (write) %pg's sb offset: %llu\n",
2736                                  rdev->bdev,
2737                                  (unsigned long long)rdev->sb_start);
2738                         rdev->sb_events = mddev->events;
2739                         if (rdev->badblocks.size) {
2740                                 md_super_write(mddev, rdev,
2741                                                rdev->badblocks.sector,
2742                                                rdev->badblocks.size << 9,
2743                                                rdev->bb_page);
2744                                 rdev->badblocks.size = 0;
2745                         }
2746
2747                 } else
2748                         pr_debug("md: %pg (skipping faulty)\n",
2749                                  rdev->bdev);
2750
2751                 if (mddev->level == LEVEL_MULTIPATH)
2752                         /* only need to write one superblock... */
2753                         break;
2754         }
2755         if (md_super_wait(mddev) < 0)
2756                 goto rewrite;
2757         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2758
2759         if (mddev_is_clustered(mddev) && ret == 0)
2760                 md_cluster_ops->metadata_update_finish(mddev);
2761
2762         if (mddev->in_sync != sync_req ||
2763             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2764                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2765                 /* have to write it out again */
2766                 goto repeat;
2767         wake_up(&mddev->sb_wait);
2768         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2769                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
2770
2771         rdev_for_each(rdev, mddev) {
2772                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2773                         clear_bit(Blocked, &rdev->flags);
2774
2775                 if (any_badblocks_changed)
2776                         ack_all_badblocks(&rdev->badblocks);
2777                 clear_bit(BlockedBadBlocks, &rdev->flags);
2778                 wake_up(&rdev->blocked_wait);
2779         }
2780 }
2781 EXPORT_SYMBOL(md_update_sb);
2782
2783 static int add_bound_rdev(struct md_rdev *rdev)
2784 {
2785         struct mddev *mddev = rdev->mddev;
2786         int err = 0;
2787         bool add_journal = test_bit(Journal, &rdev->flags);
2788
2789         if (!mddev->pers->hot_remove_disk || add_journal) {
2790                 /* If there is hot_add_disk but no hot_remove_disk
2791                  * then added disks for geometry changes,
2792                  * and should be added immediately.
2793                  */
2794                 super_types[mddev->major_version].
2795                         validate_super(mddev, rdev);
2796                 if (add_journal)
2797                         mddev_suspend(mddev);
2798                 err = mddev->pers->hot_add_disk(mddev, rdev);
2799                 if (add_journal)
2800                         mddev_resume(mddev);
2801                 if (err) {
2802                         md_kick_rdev_from_array(rdev);
2803                         return err;
2804                 }
2805         }
2806         sysfs_notify_dirent_safe(rdev->sysfs_state);
2807
2808         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2809         if (mddev->degraded)
2810                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2811         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2812         md_new_event();
2813         md_wakeup_thread(mddev->thread);
2814         return 0;
2815 }
2816
2817 /* words written to sysfs files may, or may not, be \n terminated.
2818  * We want to accept with case. For this we use cmd_match.
2819  */
2820 static int cmd_match(const char *cmd, const char *str)
2821 {
2822         /* See if cmd, written into a sysfs file, matches
2823          * str.  They must either be the same, or cmd can
2824          * have a trailing newline
2825          */
2826         while (*cmd && *str && *cmd == *str) {
2827                 cmd++;
2828                 str++;
2829         }
2830         if (*cmd == '\n')
2831                 cmd++;
2832         if (*str || *cmd)
2833                 return 0;
2834         return 1;
2835 }
2836
2837 struct rdev_sysfs_entry {
2838         struct attribute attr;
2839         ssize_t (*show)(struct md_rdev *, char *);
2840         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2841 };
2842
2843 static ssize_t
2844 state_show(struct md_rdev *rdev, char *page)
2845 {
2846         char *sep = ",";
2847         size_t len = 0;
2848         unsigned long flags = READ_ONCE(rdev->flags);
2849
2850         if (test_bit(Faulty, &flags) ||
2851             (!test_bit(ExternalBbl, &flags) &&
2852             rdev->badblocks.unacked_exist))
2853                 len += sprintf(page+len, "faulty%s", sep);
2854         if (test_bit(In_sync, &flags))
2855                 len += sprintf(page+len, "in_sync%s", sep);
2856         if (test_bit(Journal, &flags))
2857                 len += sprintf(page+len, "journal%s", sep);
2858         if (test_bit(WriteMostly, &flags))
2859                 len += sprintf(page+len, "write_mostly%s", sep);
2860         if (test_bit(Blocked, &flags) ||
2861             (rdev->badblocks.unacked_exist
2862              && !test_bit(Faulty, &flags)))
2863                 len += sprintf(page+len, "blocked%s", sep);
2864         if (!test_bit(Faulty, &flags) &&
2865             !test_bit(Journal, &flags) &&
2866             !test_bit(In_sync, &flags))
2867                 len += sprintf(page+len, "spare%s", sep);
2868         if (test_bit(WriteErrorSeen, &flags))
2869                 len += sprintf(page+len, "write_error%s", sep);
2870         if (test_bit(WantReplacement, &flags))
2871                 len += sprintf(page+len, "want_replacement%s", sep);
2872         if (test_bit(Replacement, &flags))
2873                 len += sprintf(page+len, "replacement%s", sep);
2874         if (test_bit(ExternalBbl, &flags))
2875                 len += sprintf(page+len, "external_bbl%s", sep);
2876         if (test_bit(FailFast, &flags))
2877                 len += sprintf(page+len, "failfast%s", sep);
2878
2879         if (len)
2880                 len -= strlen(sep);
2881
2882         return len+sprintf(page+len, "\n");
2883 }
2884
2885 static ssize_t
2886 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2887 {
2888         /* can write
2889          *  faulty  - simulates an error
2890          *  remove  - disconnects the device
2891          *  writemostly - sets write_mostly
2892          *  -writemostly - clears write_mostly
2893          *  blocked - sets the Blocked flags
2894          *  -blocked - clears the Blocked and possibly simulates an error
2895          *  insync - sets Insync providing device isn't active
2896          *  -insync - clear Insync for a device with a slot assigned,
2897          *            so that it gets rebuilt based on bitmap
2898          *  write_error - sets WriteErrorSeen
2899          *  -write_error - clears WriteErrorSeen
2900          *  {,-}failfast - set/clear FailFast
2901          */
2902
2903         struct mddev *mddev = rdev->mddev;
2904         int err = -EINVAL;
2905         bool need_update_sb = false;
2906
2907         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2908                 md_error(rdev->mddev, rdev);
2909
2910                 if (test_bit(MD_BROKEN, &rdev->mddev->flags))
2911                         err = -EBUSY;
2912                 else
2913                         err = 0;
2914         } else if (cmd_match(buf, "remove")) {
2915                 if (rdev->mddev->pers) {
2916                         clear_bit(Blocked, &rdev->flags);
2917                         remove_and_add_spares(rdev->mddev, rdev);
2918                 }
2919                 if (rdev->raid_disk >= 0)
2920                         err = -EBUSY;
2921                 else {
2922                         err = 0;
2923                         if (mddev_is_clustered(mddev))
2924                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2925
2926                         if (err == 0) {
2927                                 md_kick_rdev_from_array(rdev);
2928                                 if (mddev->pers) {
2929                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2930                                         md_wakeup_thread(mddev->thread);
2931                                 }
2932                                 md_new_event();
2933                         }
2934                 }
2935         } else if (cmd_match(buf, "writemostly")) {
2936                 set_bit(WriteMostly, &rdev->flags);
2937                 mddev_create_serial_pool(rdev->mddev, rdev, false);
2938                 need_update_sb = true;
2939                 err = 0;
2940         } else if (cmd_match(buf, "-writemostly")) {
2941                 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2942                 clear_bit(WriteMostly, &rdev->flags);
2943                 need_update_sb = true;
2944                 err = 0;
2945         } else if (cmd_match(buf, "blocked")) {
2946                 set_bit(Blocked, &rdev->flags);
2947                 err = 0;
2948         } else if (cmd_match(buf, "-blocked")) {
2949                 if (!test_bit(Faulty, &rdev->flags) &&
2950                     !test_bit(ExternalBbl, &rdev->flags) &&
2951                     rdev->badblocks.unacked_exist) {
2952                         /* metadata handler doesn't understand badblocks,
2953                          * so we need to fail the device
2954                          */
2955                         md_error(rdev->mddev, rdev);
2956                 }
2957                 clear_bit(Blocked, &rdev->flags);
2958                 clear_bit(BlockedBadBlocks, &rdev->flags);
2959                 wake_up(&rdev->blocked_wait);
2960                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2961                 md_wakeup_thread(rdev->mddev->thread);
2962
2963                 err = 0;
2964         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2965                 set_bit(In_sync, &rdev->flags);
2966                 err = 0;
2967         } else if (cmd_match(buf, "failfast")) {
2968                 set_bit(FailFast, &rdev->flags);
2969                 need_update_sb = true;
2970                 err = 0;
2971         } else if (cmd_match(buf, "-failfast")) {
2972                 clear_bit(FailFast, &rdev->flags);
2973                 need_update_sb = true;
2974                 err = 0;
2975         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2976                    !test_bit(Journal, &rdev->flags)) {
2977                 if (rdev->mddev->pers == NULL) {
2978                         clear_bit(In_sync, &rdev->flags);
2979                         rdev->saved_raid_disk = rdev->raid_disk;
2980                         rdev->raid_disk = -1;
2981                         err = 0;
2982                 }
2983         } else if (cmd_match(buf, "write_error")) {
2984                 set_bit(WriteErrorSeen, &rdev->flags);
2985                 err = 0;
2986         } else if (cmd_match(buf, "-write_error")) {
2987                 clear_bit(WriteErrorSeen, &rdev->flags);
2988                 err = 0;
2989         } else if (cmd_match(buf, "want_replacement")) {
2990                 /* Any non-spare device that is not a replacement can
2991                  * become want_replacement at any time, but we then need to
2992                  * check if recovery is needed.
2993                  */
2994                 if (rdev->raid_disk >= 0 &&
2995                     !test_bit(Journal, &rdev->flags) &&
2996                     !test_bit(Replacement, &rdev->flags))
2997                         set_bit(WantReplacement, &rdev->flags);
2998                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2999                 md_wakeup_thread(rdev->mddev->thread);
3000                 err = 0;
3001         } else if (cmd_match(buf, "-want_replacement")) {
3002                 /* Clearing 'want_replacement' is always allowed.
3003                  * Once replacements starts it is too late though.
3004                  */
3005                 err = 0;
3006                 clear_bit(WantReplacement, &rdev->flags);
3007         } else if (cmd_match(buf, "replacement")) {
3008                 /* Can only set a device as a replacement when array has not
3009                  * yet been started.  Once running, replacement is automatic
3010                  * from spares, or by assigning 'slot'.
3011                  */
3012                 if (rdev->mddev->pers)
3013                         err = -EBUSY;
3014                 else {
3015                         set_bit(Replacement, &rdev->flags);
3016                         err = 0;
3017                 }
3018         } else if (cmd_match(buf, "-replacement")) {
3019                 /* Similarly, can only clear Replacement before start */
3020                 if (rdev->mddev->pers)
3021                         err = -EBUSY;
3022                 else {
3023                         clear_bit(Replacement, &rdev->flags);
3024                         err = 0;
3025                 }
3026         } else if (cmd_match(buf, "re-add")) {
3027                 if (!rdev->mddev->pers)
3028                         err = -EINVAL;
3029                 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3030                                 rdev->saved_raid_disk >= 0) {
3031                         /* clear_bit is performed _after_ all the devices
3032                          * have their local Faulty bit cleared. If any writes
3033                          * happen in the meantime in the local node, they
3034                          * will land in the local bitmap, which will be synced
3035                          * by this node eventually
3036                          */
3037                         if (!mddev_is_clustered(rdev->mddev) ||
3038                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3039                                 clear_bit(Faulty, &rdev->flags);
3040                                 err = add_bound_rdev(rdev);
3041                         }
3042                 } else
3043                         err = -EBUSY;
3044         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3045                 set_bit(ExternalBbl, &rdev->flags);
3046                 rdev->badblocks.shift = 0;
3047                 err = 0;
3048         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3049                 clear_bit(ExternalBbl, &rdev->flags);
3050                 err = 0;
3051         }
3052         if (need_update_sb)
3053                 md_update_sb(mddev, 1);
3054         if (!err)
3055                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3056         return err ? err : len;
3057 }
3058 static struct rdev_sysfs_entry rdev_state =
3059 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
3060
3061 static ssize_t
3062 errors_show(struct md_rdev *rdev, char *page)
3063 {
3064         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3065 }
3066
3067 static ssize_t
3068 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3069 {
3070         unsigned int n;
3071         int rv;
3072
3073         rv = kstrtouint(buf, 10, &n);
3074         if (rv < 0)
3075                 return rv;
3076         atomic_set(&rdev->corrected_errors, n);
3077         return len;
3078 }
3079 static struct rdev_sysfs_entry rdev_errors =
3080 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
3081
3082 static ssize_t
3083 slot_show(struct md_rdev *rdev, char *page)
3084 {
3085         if (test_bit(Journal, &rdev->flags))
3086                 return sprintf(page, "journal\n");
3087         else if (rdev->raid_disk < 0)
3088                 return sprintf(page, "none\n");
3089         else
3090                 return sprintf(page, "%d\n", rdev->raid_disk);
3091 }
3092
3093 static ssize_t
3094 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3095 {
3096         int slot;
3097         int err;
3098
3099         if (test_bit(Journal, &rdev->flags))
3100                 return -EBUSY;
3101         if (strncmp(buf, "none", 4)==0)
3102                 slot = -1;
3103         else {
3104                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
3105                 if (err < 0)
3106                         return err;
3107                 if (slot < 0)
3108                         /* overflow */
3109                         return -ENOSPC;
3110         }
3111         if (rdev->mddev->pers && slot == -1) {
3112                 /* Setting 'slot' on an active array requires also
3113                  * updating the 'rd%d' link, and communicating
3114                  * with the personality with ->hot_*_disk.
3115                  * For now we only support removing
3116                  * failed/spare devices.  This normally happens automatically,
3117                  * but not when the metadata is externally managed.
3118                  */
3119                 if (rdev->raid_disk == -1)
3120                         return -EEXIST;
3121                 /* personality does all needed checks */
3122                 if (rdev->mddev->pers->hot_remove_disk == NULL)
3123                         return -EINVAL;
3124                 clear_bit(Blocked, &rdev->flags);
3125                 remove_and_add_spares(rdev->mddev, rdev);
3126                 if (rdev->raid_disk >= 0)
3127                         return -EBUSY;
3128                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3129                 md_wakeup_thread(rdev->mddev->thread);
3130         } else if (rdev->mddev->pers) {
3131                 /* Activating a spare .. or possibly reactivating
3132                  * if we ever get bitmaps working here.
3133                  */
3134                 int err;
3135
3136                 if (rdev->raid_disk != -1)
3137                         return -EBUSY;
3138
3139                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3140                         return -EBUSY;
3141
3142                 if (rdev->mddev->pers->hot_add_disk == NULL)
3143                         return -EINVAL;
3144
3145                 if (slot >= rdev->mddev->raid_disks &&
3146                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3147                         return -ENOSPC;
3148
3149                 rdev->raid_disk = slot;
3150                 if (test_bit(In_sync, &rdev->flags))
3151                         rdev->saved_raid_disk = slot;
3152                 else
3153                         rdev->saved_raid_disk = -1;
3154                 clear_bit(In_sync, &rdev->flags);
3155                 clear_bit(Bitmap_sync, &rdev->flags);
3156                 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3157                 if (err) {
3158                         rdev->raid_disk = -1;
3159                         return err;
3160                 } else
3161                         sysfs_notify_dirent_safe(rdev->sysfs_state);
3162                 /* failure here is OK */;
3163                 sysfs_link_rdev(rdev->mddev, rdev);
3164                 /* don't wakeup anyone, leave that to userspace. */
3165         } else {
3166                 if (slot >= rdev->mddev->raid_disks &&
3167                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3168                         return -ENOSPC;
3169                 rdev->raid_disk = slot;
3170                 /* assume it is working */
3171                 clear_bit(Faulty, &rdev->flags);
3172                 clear_bit(WriteMostly, &rdev->flags);
3173                 set_bit(In_sync, &rdev->flags);
3174                 sysfs_notify_dirent_safe(rdev->sysfs_state);
3175         }
3176         return len;
3177 }
3178
3179 static struct rdev_sysfs_entry rdev_slot =
3180 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
3181
3182 static ssize_t
3183 offset_show(struct md_rdev *rdev, char *page)
3184 {
3185         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3186 }
3187
3188 static ssize_t
3189 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3190 {
3191         unsigned long long offset;
3192         if (kstrtoull(buf, 10, &offset) < 0)
3193                 return -EINVAL;
3194         if (rdev->mddev->pers && rdev->raid_disk >= 0)
3195                 return -EBUSY;
3196         if (rdev->sectors && rdev->mddev->external)
3197                 /* Must set offset before size, so overlap checks
3198                  * can be sane */
3199                 return -EBUSY;
3200         rdev->data_offset = offset;
3201         rdev->new_data_offset = offset;
3202         return len;
3203 }
3204
3205 static struct rdev_sysfs_entry rdev_offset =
3206 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
3207
3208 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3209 {
3210         return sprintf(page, "%llu\n",
3211                        (unsigned long long)rdev->new_data_offset);
3212 }
3213
3214 static ssize_t new_offset_store(struct md_rdev *rdev,
3215                                 const char *buf, size_t len)
3216 {
3217         unsigned long long new_offset;
3218         struct mddev *mddev = rdev->mddev;
3219
3220         if (kstrtoull(buf, 10, &new_offset) < 0)
3221                 return -EINVAL;
3222
3223         if (mddev->sync_thread ||
3224             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
3225                 return -EBUSY;
3226         if (new_offset == rdev->data_offset)
3227                 /* reset is always permitted */
3228                 ;
3229         else if (new_offset > rdev->data_offset) {
3230                 /* must not push array size beyond rdev_sectors */
3231                 if (new_offset - rdev->data_offset
3232                     + mddev->dev_sectors > rdev->sectors)
3233                                 return -E2BIG;
3234         }
3235         /* Metadata worries about other space details. */
3236
3237         /* decreasing the offset is inconsistent with a backwards
3238          * reshape.
3239          */
3240         if (new_offset < rdev->data_offset &&
3241             mddev->reshape_backwards)
3242                 return -EINVAL;
3243         /* Increasing offset is inconsistent with forwards
3244          * reshape.  reshape_direction should be set to
3245          * 'backwards' first.
3246          */
3247         if (new_offset > rdev->data_offset &&
3248             !mddev->reshape_backwards)
3249                 return -EINVAL;
3250
3251         if (mddev->pers && mddev->persistent &&
3252             !super_types[mddev->major_version]
3253             .allow_new_offset(rdev, new_offset))
3254                 return -E2BIG;
3255         rdev->new_data_offset = new_offset;
3256         if (new_offset > rdev->data_offset)
3257                 mddev->reshape_backwards = 1;
3258         else if (new_offset < rdev->data_offset)
3259                 mddev->reshape_backwards = 0;
3260
3261         return len;
3262 }
3263 static struct rdev_sysfs_entry rdev_new_offset =
3264 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
3265
3266 static ssize_t
3267 rdev_size_show(struct md_rdev *rdev, char *page)
3268 {
3269         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3270 }
3271
3272 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b)
3273 {
3274         /* check if two start/length pairs overlap */
3275         if (a->data_offset + a->sectors <= b->data_offset)
3276                 return false;
3277         if (b->data_offset + b->sectors <= a->data_offset)
3278                 return false;
3279         return true;
3280 }
3281
3282 static bool md_rdev_overlaps(struct md_rdev *rdev)
3283 {
3284         struct mddev *mddev;
3285         struct md_rdev *rdev2;
3286
3287         spin_lock(&all_mddevs_lock);
3288         list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
3289                 if (test_bit(MD_DELETED, &mddev->flags))
3290                         continue;
3291                 rdev_for_each(rdev2, mddev) {
3292                         if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3293                             md_rdevs_overlap(rdev, rdev2)) {
3294                                 spin_unlock(&all_mddevs_lock);
3295                                 return true;
3296                         }
3297                 }
3298         }
3299         spin_unlock(&all_mddevs_lock);
3300         return false;
3301 }
3302
3303 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
3304 {
3305         unsigned long long blocks;
3306         sector_t new;
3307
3308         if (kstrtoull(buf, 10, &blocks) < 0)
3309                 return -EINVAL;
3310
3311         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
3312                 return -EINVAL; /* sector conversion overflow */
3313
3314         new = blocks * 2;
3315         if (new != blocks * 2)
3316                 return -EINVAL; /* unsigned long long to sector_t overflow */
3317
3318         *sectors = new;
3319         return 0;
3320 }
3321
3322 static ssize_t
3323 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3324 {
3325         struct mddev *my_mddev = rdev->mddev;
3326         sector_t oldsectors = rdev->sectors;
3327         sector_t sectors;
3328
3329         if (test_bit(Journal, &rdev->flags))
3330                 return -EBUSY;
3331         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3332                 return -EINVAL;
3333         if (rdev->data_offset != rdev->new_data_offset)
3334                 return -EINVAL; /* too confusing */
3335         if (my_mddev->pers && rdev->raid_disk >= 0) {
3336                 if (my_mddev->persistent) {
3337                         sectors = super_types[my_mddev->major_version].
3338                                 rdev_size_change(rdev, sectors);
3339                         if (!sectors)
3340                                 return -EBUSY;
3341                 } else if (!sectors)
3342                         sectors = bdev_nr_sectors(rdev->bdev) -
3343                                 rdev->data_offset;
3344                 if (!my_mddev->pers->resize)
3345                         /* Cannot change size for RAID0 or Linear etc */
3346                         return -EINVAL;
3347         }
3348         if (sectors < my_mddev->dev_sectors)
3349                 return -EINVAL; /* component must fit device */
3350
3351         rdev->sectors = sectors;
3352
3353         /*
3354          * Check that all other rdevs with the same bdev do not overlap.  This
3355          * check does not provide a hard guarantee, it just helps avoid
3356          * dangerous mistakes.
3357          */
3358         if (sectors > oldsectors && my_mddev->external &&
3359             md_rdev_overlaps(rdev)) {
3360                 /*
3361                  * Someone else could have slipped in a size change here, but
3362                  * doing so is just silly.  We put oldsectors back because we
3363                  * know it is safe, and trust userspace not to race with itself.
3364                  */
3365                 rdev->sectors = oldsectors;
3366                 return -EBUSY;
3367         }
3368         return len;
3369 }
3370
3371 static struct rdev_sysfs_entry rdev_size =
3372 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3373
3374 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3375 {
3376         unsigned long long recovery_start = rdev->recovery_offset;
3377
3378         if (test_bit(In_sync, &rdev->flags) ||
3379             recovery_start == MaxSector)
3380                 return sprintf(page, "none\n");
3381
3382         return sprintf(page, "%llu\n", recovery_start);
3383 }
3384
3385 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3386 {
3387         unsigned long long recovery_start;
3388
3389         if (cmd_match(buf, "none"))
3390                 recovery_start = MaxSector;
3391         else if (kstrtoull(buf, 10, &recovery_start))
3392                 return -EINVAL;
3393
3394         if (rdev->mddev->pers &&
3395             rdev->raid_disk >= 0)
3396                 return -EBUSY;
3397
3398         rdev->recovery_offset = recovery_start;
3399         if (recovery_start == MaxSector)
3400                 set_bit(In_sync, &rdev->flags);
3401         else
3402                 clear_bit(In_sync, &rdev->flags);
3403         return len;
3404 }
3405
3406 static struct rdev_sysfs_entry rdev_recovery_start =
3407 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3408
3409 /* sysfs access to bad-blocks list.
3410  * We present two files.
3411  * 'bad-blocks' lists sector numbers and lengths of ranges that
3412  *    are recorded as bad.  The list is truncated to fit within
3413  *    the one-page limit of sysfs.
3414  *    Writing "sector length" to this file adds an acknowledged
3415  *    bad block list.
3416  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3417  *    been acknowledged.  Writing to this file adds bad blocks
3418  *    without acknowledging them.  This is largely for testing.
3419  */
3420 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3421 {
3422         return badblocks_show(&rdev->badblocks, page, 0);
3423 }
3424 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3425 {
3426         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3427         /* Maybe that ack was all we needed */
3428         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3429                 wake_up(&rdev->blocked_wait);
3430         return rv;
3431 }
3432 static struct rdev_sysfs_entry rdev_bad_blocks =
3433 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3434
3435 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3436 {
3437         return badblocks_show(&rdev->badblocks, page, 1);
3438 }
3439 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3440 {
3441         return badblocks_store(&rdev->badblocks, page, len, 1);
3442 }
3443 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3444 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3445
3446 static ssize_t
3447 ppl_sector_show(struct md_rdev *rdev, char *page)
3448 {
3449         return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3450 }
3451
3452 static ssize_t
3453 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3454 {
3455         unsigned long long sector;
3456
3457         if (kstrtoull(buf, 10, &sector) < 0)
3458                 return -EINVAL;
3459         if (sector != (sector_t)sector)
3460                 return -EINVAL;
3461
3462         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3463             rdev->raid_disk >= 0)
3464                 return -EBUSY;
3465
3466         if (rdev->mddev->persistent) {
3467                 if (rdev->mddev->major_version == 0)
3468                         return -EINVAL;
3469                 if ((sector > rdev->sb_start &&
3470                      sector - rdev->sb_start > S16_MAX) ||
3471                     (sector < rdev->sb_start &&
3472                      rdev->sb_start - sector > -S16_MIN))
3473                         return -EINVAL;
3474                 rdev->ppl.offset = sector - rdev->sb_start;
3475         } else if (!rdev->mddev->external) {
3476                 return -EBUSY;
3477         }
3478         rdev->ppl.sector = sector;
3479         return len;
3480 }
3481
3482 static struct rdev_sysfs_entry rdev_ppl_sector =
3483 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
3484
3485 static ssize_t
3486 ppl_size_show(struct md_rdev *rdev, char *page)
3487 {
3488         return sprintf(page, "%u\n", rdev->ppl.size);
3489 }
3490
3491 static ssize_t
3492 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3493 {
3494         unsigned int size;
3495
3496         if (kstrtouint(buf, 10, &size) < 0)
3497                 return -EINVAL;
3498
3499         if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3500             rdev->raid_disk >= 0)
3501                 return -EBUSY;
3502
3503         if (rdev->mddev->persistent) {
3504                 if (rdev->mddev->major_version == 0)
3505                         return -EINVAL;
3506                 if (size > U16_MAX)
3507                         return -EINVAL;
3508         } else if (!rdev->mddev->external) {
3509                 return -EBUSY;
3510         }
3511         rdev->ppl.size = size;
3512         return len;
3513 }
3514
3515 static struct rdev_sysfs_entry rdev_ppl_size =
3516 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
3517
3518 static struct attribute *rdev_default_attrs[] = {
3519         &rdev_state.attr,
3520         &rdev_errors.attr,
3521         &rdev_slot.attr,
3522         &rdev_offset.attr,
3523         &rdev_new_offset.attr,
3524         &rdev_size.attr,
3525         &rdev_recovery_start.attr,
3526         &rdev_bad_blocks.attr,
3527         &rdev_unack_bad_blocks.attr,
3528         &rdev_ppl_sector.attr,
3529         &rdev_ppl_size.attr,
3530         NULL,
3531 };
3532 ATTRIBUTE_GROUPS(rdev_default);
3533 static ssize_t
3534 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3535 {
3536         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3537         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3538
3539         if (!entry->show)
3540                 return -EIO;
3541         if (!rdev->mddev)
3542                 return -ENODEV;
3543         return entry->show(rdev, page);
3544 }
3545
3546 static ssize_t
3547 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3548               const char *page, size_t length)
3549 {
3550         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3551         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3552         struct kernfs_node *kn = NULL;
3553         ssize_t rv;
3554         struct mddev *mddev = rdev->mddev;
3555
3556         if (!entry->store)
3557                 return -EIO;
3558         if (!capable(CAP_SYS_ADMIN))
3559                 return -EACCES;
3560
3561         if (entry->store == state_store && cmd_match(page, "remove"))
3562                 kn = sysfs_break_active_protection(kobj, attr);
3563
3564         rv = mddev ? mddev_lock(mddev) : -ENODEV;
3565         if (!rv) {
3566                 if (rdev->mddev == NULL)
3567                         rv = -ENODEV;
3568                 else
3569                         rv = entry->store(rdev, page, length);
3570                 mddev_unlock(mddev);
3571         }
3572
3573         if (kn)
3574                 sysfs_unbreak_active_protection(kn);
3575
3576         return rv;
3577 }
3578
3579 static void rdev_free(struct kobject *ko)
3580 {
3581         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3582         kfree(rdev);
3583 }
3584 static const struct sysfs_ops rdev_sysfs_ops = {
3585         .show           = rdev_attr_show,
3586         .store          = rdev_attr_store,
3587 };
3588 static const struct kobj_type rdev_ktype = {
3589         .release        = rdev_free,
3590         .sysfs_ops      = &rdev_sysfs_ops,
3591         .default_groups = rdev_default_groups,
3592 };
3593
3594 int md_rdev_init(struct md_rdev *rdev)
3595 {
3596         rdev->desc_nr = -1;
3597         rdev->saved_raid_disk = -1;
3598         rdev->raid_disk = -1;
3599         rdev->flags = 0;
3600         rdev->data_offset = 0;
3601         rdev->new_data_offset = 0;
3602         rdev->sb_events = 0;
3603         rdev->last_read_error = 0;
3604         rdev->sb_loaded = 0;
3605         rdev->bb_page = NULL;
3606         atomic_set(&rdev->nr_pending, 0);
3607         atomic_set(&rdev->read_errors, 0);
3608         atomic_set(&rdev->corrected_errors, 0);
3609
3610         INIT_LIST_HEAD(&rdev->same_set);
3611         init_waitqueue_head(&rdev->blocked_wait);
3612
3613         /* Add space to store bad block list.
3614          * This reserves the space even on arrays where it cannot
3615          * be used - I wonder if that matters
3616          */
3617         return badblocks_init(&rdev->badblocks, 0);
3618 }
3619 EXPORT_SYMBOL_GPL(md_rdev_init);
3620
3621 /*
3622  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3623  *
3624  * mark the device faulty if:
3625  *
3626  *   - the device is nonexistent (zero size)
3627  *   - the device has no valid superblock
3628  *
3629  * a faulty rdev _never_ has rdev->sb set.
3630  */
3631 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3632 {
3633         struct md_rdev *rdev;
3634         sector_t size;
3635         int err;
3636
3637         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3638         if (!rdev)
3639                 return ERR_PTR(-ENOMEM);
3640
3641         err = md_rdev_init(rdev);
3642         if (err)
3643                 goto out_free_rdev;
3644         err = alloc_disk_sb(rdev);
3645         if (err)
3646                 goto out_clear_rdev;
3647
3648         rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
3649                         super_format == -2 ? &claim_rdev : rdev, NULL);
3650         if (IS_ERR(rdev->bdev)) {
3651                 pr_warn("md: could not open device unknown-block(%u,%u).\n",
3652                         MAJOR(newdev), MINOR(newdev));
3653                 err = PTR_ERR(rdev->bdev);
3654                 goto out_clear_rdev;
3655         }
3656
3657         kobject_init(&rdev->kobj, &rdev_ktype);
3658
3659         size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3660         if (!size) {
3661                 pr_warn("md: %pg has zero or unknown size, marking faulty!\n",
3662                         rdev->bdev);
3663                 err = -EINVAL;
3664                 goto out_blkdev_put;
3665         }
3666
3667         if (super_format >= 0) {
3668                 err = super_types[super_format].
3669                         load_super(rdev, NULL, super_minor);
3670                 if (err == -EINVAL) {
3671                         pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n",
3672                                 rdev->bdev,
3673                                 super_format, super_minor);
3674                         goto out_blkdev_put;
3675                 }
3676                 if (err < 0) {
3677                         pr_warn("md: could not read %pg's sb, not importing!\n",
3678                                 rdev->bdev);
3679                         goto out_blkdev_put;
3680                 }
3681         }
3682
3683         return rdev;
3684
3685 out_blkdev_put:
3686         blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev);
3687 out_clear_rdev:
3688         md_rdev_clear(rdev);
3689 out_free_rdev:
3690         kfree(rdev);
3691         return ERR_PTR(err);
3692 }
3693
3694 /*
3695  * Check a full RAID array for plausibility
3696  */
3697
3698 static int analyze_sbs(struct mddev *mddev)
3699 {
3700         int i;
3701         struct md_rdev *rdev, *freshest, *tmp;
3702
3703         freshest = NULL;
3704         rdev_for_each_safe(rdev, tmp, mddev)
3705                 switch (super_types[mddev->major_version].
3706                         load_super(rdev, freshest, mddev->minor_version)) {
3707                 case 1:
3708                         freshest = rdev;
3709                         break;
3710                 case 0:
3711                         break;
3712                 default:
3713                         pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n",
3714                                 rdev->bdev);
3715                         md_kick_rdev_from_array(rdev);
3716                 }
3717
3718         /* Cannot find a valid fresh disk */
3719         if (!freshest) {
3720                 pr_warn("md: cannot find a valid disk\n");
3721                 return -EINVAL;
3722         }
3723
3724         super_types[mddev->major_version].
3725                 validate_super(mddev, freshest);
3726
3727         i = 0;
3728         rdev_for_each_safe(rdev, tmp, mddev) {
3729                 if (mddev->max_disks &&
3730                     (rdev->desc_nr >= mddev->max_disks ||
3731                      i > mddev->max_disks)) {
3732                         pr_warn("md: %s: %pg: only %d devices permitted\n",
3733                                 mdname(mddev), rdev->bdev,
3734                                 mddev->max_disks);
3735                         md_kick_rdev_from_array(rdev);
3736                         continue;
3737                 }
3738                 if (rdev != freshest) {
3739                         if (super_types[mddev->major_version].
3740                             validate_super(mddev, rdev)) {
3741                                 pr_warn("md: kicking non-fresh %pg from array!\n",
3742                                         rdev->bdev);
3743                                 md_kick_rdev_from_array(rdev);
3744                                 continue;
3745                         }
3746                 }
3747                 if (mddev->level == LEVEL_MULTIPATH) {
3748                         rdev->desc_nr = i++;
3749                         rdev->raid_disk = rdev->desc_nr;
3750                         set_bit(In_sync, &rdev->flags);
3751                 } else if (rdev->raid_disk >=
3752                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3753                            !test_bit(Journal, &rdev->flags)) {
3754                         rdev->raid_disk = -1;
3755                         clear_bit(In_sync, &rdev->flags);
3756                 }
3757         }
3758
3759         return 0;
3760 }
3761
3762 /* Read a fixed-point number.
3763  * Numbers in sysfs attributes should be in "standard" units where
3764  * possible, so time should be in seconds.
3765  * However we internally use a a much smaller unit such as
3766  * milliseconds or jiffies.
3767  * This function takes a decimal number with a possible fractional
3768  * component, and produces an integer which is the result of
3769  * multiplying that number by 10^'scale'.
3770  * all without any floating-point arithmetic.
3771  */
3772 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3773 {
3774         unsigned long result = 0;
3775         long decimals = -1;
3776         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3777                 if (*cp == '.')
3778                         decimals = 0;
3779                 else if (decimals < scale) {
3780                         unsigned int value;
3781                         value = *cp - '0';
3782                         result = result * 10 + value;
3783                         if (decimals >= 0)
3784                                 decimals++;
3785                 }
3786                 cp++;
3787         }
3788         if (*cp == '\n')
3789                 cp++;
3790         if (*cp)
3791                 return -EINVAL;
3792         if (decimals < 0)
3793                 decimals = 0;
3794         *res = result * int_pow(10, scale - decimals);
3795         return 0;
3796 }
3797
3798 static ssize_t
3799 safe_delay_show(struct mddev *mddev, char *page)
3800 {
3801         unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ;
3802
3803         return sprintf(page, "%u.%03u\n", msec/1000, msec%1000);
3804 }
3805 static ssize_t
3806 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3807 {
3808         unsigned long msec;
3809
3810         if (mddev_is_clustered(mddev)) {
3811                 pr_warn("md: Safemode is disabled for clustered mode\n");
3812                 return -EINVAL;
3813         }
3814
3815         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ)
3816                 return -EINVAL;
3817         if (msec == 0)
3818                 mddev->safemode_delay = 0;
3819         else {
3820                 unsigned long old_delay = mddev->safemode_delay;
3821                 unsigned long new_delay = (msec*HZ)/1000;
3822
3823                 if (new_delay == 0)
3824                         new_delay = 1;
3825                 mddev->safemode_delay = new_delay;
3826                 if (new_delay < old_delay || old_delay == 0)
3827                         mod_timer(&mddev->safemode_timer, jiffies+1);
3828         }
3829         return len;
3830 }
3831 static struct md_sysfs_entry md_safe_delay =
3832 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3833
3834 static ssize_t
3835 level_show(struct mddev *mddev, char *page)
3836 {
3837         struct md_personality *p;
3838         int ret;
3839         spin_lock(&mddev->lock);
3840         p = mddev->pers;
3841         if (p)
3842                 ret = sprintf(page, "%s\n", p->name);
3843         else if (mddev->clevel[0])
3844                 ret = sprintf(page, "%s\n", mddev->clevel);
3845         else if (mddev->level != LEVEL_NONE)
3846                 ret = sprintf(page, "%d\n", mddev->level);
3847         else
3848                 ret = 0;
3849         spin_unlock(&mddev->lock);
3850         return ret;
3851 }
3852
3853 static ssize_t
3854 level_store(struct mddev *mddev, const char *buf, size_t len)
3855 {
3856         char clevel[16];
3857         ssize_t rv;
3858         size_t slen = len;
3859         struct md_personality *pers, *oldpers;
3860         long level;
3861         void *priv, *oldpriv;
3862         struct md_rdev *rdev;
3863
3864         if (slen == 0 || slen >= sizeof(clevel))
3865                 return -EINVAL;
3866
3867         rv = mddev_lock(mddev);
3868         if (rv)
3869                 return rv;
3870
3871         if (mddev->pers == NULL) {
3872                 strncpy(mddev->clevel, buf, slen);
3873                 if (mddev->clevel[slen-1] == '\n')
3874                         slen--;
3875                 mddev->clevel[slen] = 0;
3876                 mddev->level = LEVEL_NONE;
3877                 rv = len;
3878                 goto out_unlock;
3879         }
3880         rv = -EROFS;
3881         if (!md_is_rdwr(mddev))
3882                 goto out_unlock;
3883
3884         /* request to change the personality.  Need to ensure:
3885          *  - array is not engaged in resync/recovery/reshape
3886          *  - old personality can be suspended
3887          *  - new personality will access other array.
3888          */
3889
3890         rv = -EBUSY;
3891         if (mddev->sync_thread ||
3892             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3893             mddev->reshape_position != MaxSector ||
3894             mddev->sysfs_active)
3895                 goto out_unlock;
3896
3897         rv = -EINVAL;
3898         if (!mddev->pers->quiesce) {
3899                 pr_warn("md: %s: %s does not support online personality change\n",
3900                         mdname(mddev), mddev->pers->name);
3901                 goto out_unlock;
3902         }
3903
3904         /* Now find the new personality */
3905         strncpy(clevel, buf, slen);
3906         if (clevel[slen-1] == '\n')
3907                 slen--;
3908         clevel[slen] = 0;
3909         if (kstrtol(clevel, 10, &level))
3910                 level = LEVEL_NONE;
3911
3912         if (request_module("md-%s", clevel) != 0)
3913                 request_module("md-level-%s", clevel);
3914         spin_lock(&pers_lock);
3915         pers = find_pers(level, clevel);
3916         if (!pers || !try_module_get(pers->owner)) {
3917                 spin_unlock(&pers_lock);
3918                 pr_warn("md: personality %s not loaded\n", clevel);
3919                 rv = -EINVAL;
3920                 goto out_unlock;
3921         }
3922         spin_unlock(&pers_lock);
3923
3924         if (pers == mddev->pers) {
3925                 /* Nothing to do! */
3926                 module_put(pers->owner);
3927                 rv = len;
3928                 goto out_unlock;
3929         }
3930         if (!pers->takeover) {
3931                 module_put(pers->owner);
3932                 pr_warn("md: %s: %s does not support personality takeover\n",
3933                         mdname(mddev), clevel);
3934                 rv = -EINVAL;
3935                 goto out_unlock;
3936         }
3937
3938         rdev_for_each(rdev, mddev)
3939                 rdev->new_raid_disk = rdev->raid_disk;
3940
3941         /* ->takeover must set new_* and/or delta_disks
3942          * if it succeeds, and may set them when it fails.
3943          */
3944         priv = pers->takeover(mddev);
3945         if (IS_ERR(priv)) {
3946                 mddev->new_level = mddev->level;
3947                 mddev->new_layout = mddev->layout;
3948                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3949                 mddev->raid_disks -= mddev->delta_disks;
3950                 mddev->delta_disks = 0;
3951                 mddev->reshape_backwards = 0;
3952                 module_put(pers->owner);
3953                 pr_warn("md: %s: %s would not accept array\n",
3954                         mdname(mddev), clevel);
3955                 rv = PTR_ERR(priv);
3956                 goto out_unlock;
3957         }
3958
3959         /* Looks like we have a winner */
3960         mddev_suspend(mddev);
3961         mddev_detach(mddev);
3962
3963         spin_lock(&mddev->lock);
3964         oldpers = mddev->pers;
3965         oldpriv = mddev->private;
3966         mddev->pers = pers;
3967         mddev->private = priv;
3968         strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3969         mddev->level = mddev->new_level;
3970         mddev->layout = mddev->new_layout;
3971         mddev->chunk_sectors = mddev->new_chunk_sectors;
3972         mddev->delta_disks = 0;
3973         mddev->reshape_backwards = 0;
3974         mddev->degraded = 0;
3975         spin_unlock(&mddev->lock);
3976
3977         if (oldpers->sync_request == NULL &&
3978             mddev->external) {
3979                 /* We are converting from a no-redundancy array
3980                  * to a redundancy array and metadata is managed
3981                  * externally so we need to be sure that writes
3982                  * won't block due to a need to transition
3983                  *      clean->dirty
3984                  * until external management is started.
3985                  */
3986                 mddev->in_sync = 0;
3987                 mddev->safemode_delay = 0;
3988                 mddev->safemode = 0;
3989         }
3990
3991         oldpers->free(mddev, oldpriv);
3992
3993         if (oldpers->sync_request == NULL &&
3994             pers->sync_request != NULL) {
3995                 /* need to add the md_redundancy_group */
3996                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3997                         pr_warn("md: cannot register extra attributes for %s\n",
3998                                 mdname(mddev));
3999                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
4000                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
4001                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
4002         }
4003         if (oldpers->sync_request != NULL &&
4004             pers->sync_request == NULL) {
4005                 /* need to remove the md_redundancy_group */
4006                 if (mddev->to_remove == NULL)
4007                         mddev->to_remove = &md_redundancy_group;
4008         }
4009
4010         module_put(oldpers->owner);
4011
4012         rdev_for_each(rdev, mddev) {
4013                 if (rdev->raid_disk < 0)
4014                         continue;
4015                 if (rdev->new_raid_disk >= mddev->raid_disks)
4016                         rdev->new_raid_disk = -1;
4017                 if (rdev->new_raid_disk == rdev->raid_disk)
4018                         continue;
4019                 sysfs_unlink_rdev(mddev, rdev);
4020         }
4021         rdev_for_each(rdev, mddev) {
4022                 if (rdev->raid_disk < 0)
4023                         continue;
4024                 if (rdev->new_raid_disk == rdev->raid_disk)
4025                         continue;
4026                 rdev->raid_disk = rdev->new_raid_disk;
4027                 if (rdev->raid_disk < 0)
4028                         clear_bit(In_sync, &rdev->flags);
4029                 else {
4030                         if (sysfs_link_rdev(mddev, rdev))
4031                                 pr_warn("md: cannot register rd%d for %s after level change\n",
4032                                         rdev->raid_disk, mdname(mddev));
4033                 }
4034         }
4035
4036         if (pers->sync_request == NULL) {
4037                 /* this is now an array without redundancy, so
4038                  * it must always be in_sync
4039                  */
4040                 mddev->in_sync = 1;
4041                 del_timer_sync(&mddev->safemode_timer);
4042         }
4043         blk_set_stacking_limits(&mddev->queue->limits);
4044         pers->run(mddev);
4045         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4046         mddev_resume(mddev);
4047         if (!mddev->thread)
4048                 md_update_sb(mddev, 1);
4049         sysfs_notify_dirent_safe(mddev->sysfs_level);
4050         md_new_event();
4051         rv = len;
4052 out_unlock:
4053         mddev_unlock(mddev);
4054         return rv;
4055 }
4056
4057 static struct md_sysfs_entry md_level =
4058 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
4059
4060 static ssize_t
4061 layout_show(struct mddev *mddev, char *page)
4062 {
4063         /* just a number, not meaningful for all levels */
4064         if (mddev->reshape_position != MaxSector &&
4065             mddev->layout != mddev->new_layout)
4066                 return sprintf(page, "%d (%d)\n",
4067                                mddev->new_layout, mddev->layout);
4068         return sprintf(page, "%d\n", mddev->layout);
4069 }
4070
4071 static ssize_t
4072 layout_store(struct mddev *mddev, const char *buf, size_t len)
4073 {
4074         unsigned int n;
4075         int err;
4076
4077         err = kstrtouint(buf, 10, &n);
4078         if (err < 0)
4079                 return err;
4080         err = mddev_lock(mddev);
4081         if (err)
4082                 return err;
4083
4084         if (mddev->pers) {
4085                 if (mddev->pers->check_reshape == NULL)
4086                         err = -EBUSY;
4087                 else if (!md_is_rdwr(mddev))
4088                         err = -EROFS;
4089                 else {
4090                         mddev->new_layout = n;
4091                         err = mddev->pers->check_reshape(mddev);
4092                         if (err)
4093                                 mddev->new_layout = mddev->layout;
4094                 }
4095         } else {
4096                 mddev->new_layout = n;
4097                 if (mddev->reshape_position == MaxSector)
4098                         mddev->layout = n;
4099         }
4100         mddev_unlock(mddev);
4101         return err ?: len;
4102 }
4103 static struct md_sysfs_entry md_layout =
4104 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
4105
4106 static ssize_t
4107 raid_disks_show(struct mddev *mddev, char *page)
4108 {
4109         if (mddev->raid_disks == 0)
4110                 return 0;
4111         if (mddev->reshape_position != MaxSector &&
4112             mddev->delta_disks != 0)
4113                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
4114                                mddev->raid_disks - mddev->delta_disks);
4115         return sprintf(page, "%d\n", mddev->raid_disks);
4116 }
4117
4118 static int update_raid_disks(struct mddev *mddev, int raid_disks);
4119
4120 static ssize_t
4121 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
4122 {
4123         unsigned int n;
4124         int err;
4125
4126         err = kstrtouint(buf, 10, &n);
4127         if (err < 0)
4128                 return err;
4129
4130         err = mddev_lock(mddev);
4131         if (err)
4132                 return err;
4133         if (mddev->pers)
4134                 err = update_raid_disks(mddev, n);
4135         else if (mddev->reshape_position != MaxSector) {
4136                 struct md_rdev *rdev;
4137                 int olddisks = mddev->raid_disks - mddev->delta_disks;
4138
4139                 err = -EINVAL;
4140                 rdev_for_each(rdev, mddev) {
4141                         if (olddisks < n &&
4142                             rdev->data_offset < rdev->new_data_offset)
4143                                 goto out_unlock;
4144                         if (olddisks > n &&
4145                             rdev->data_offset > rdev->new_data_offset)
4146                                 goto out_unlock;
4147                 }
4148                 err = 0;
4149                 mddev->delta_disks = n - olddisks;
4150                 mddev->raid_disks = n;
4151                 mddev->reshape_backwards = (mddev->delta_disks < 0);
4152         } else
4153                 mddev->raid_disks = n;
4154 out_unlock:
4155         mddev_unlock(mddev);
4156         return err ? err : len;
4157 }
4158 static struct md_sysfs_entry md_raid_disks =
4159 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
4160
4161 static ssize_t
4162 uuid_show(struct mddev *mddev, char *page)
4163 {
4164         return sprintf(page, "%pU\n", mddev->uuid);
4165 }
4166 static struct md_sysfs_entry md_uuid =
4167 __ATTR(uuid, S_IRUGO, uuid_show, NULL);
4168
4169 static ssize_t
4170 chunk_size_show(struct mddev *mddev, char *page)
4171 {
4172         if (mddev->reshape_position != MaxSector &&
4173             mddev->chunk_sectors != mddev->new_chunk_sectors)
4174                 return sprintf(page, "%d (%d)\n",
4175                                mddev->new_chunk_sectors << 9,
4176                                mddev->chunk_sectors << 9);
4177         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
4178 }
4179
4180 static ssize_t
4181 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
4182 {
4183         unsigned long n;
4184         int err;
4185
4186         err = kstrtoul(buf, 10, &n);
4187         if (err < 0)
4188                 return err;
4189
4190         err = mddev_lock(mddev);
4191         if (err)
4192                 return err;
4193         if (mddev->pers) {
4194                 if (mddev->pers->check_reshape == NULL)
4195                         err = -EBUSY;
4196                 else if (!md_is_rdwr(mddev))
4197                         err = -EROFS;
4198                 else {
4199                         mddev->new_chunk_sectors = n >> 9;
4200                         err = mddev->pers->check_reshape(mddev);
4201                         if (err)
4202                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
4203                 }
4204         } else {
4205                 mddev->new_chunk_sectors = n >> 9;
4206                 if (mddev->reshape_position == MaxSector)
4207                         mddev->chunk_sectors = n >> 9;
4208         }
4209         mddev_unlock(mddev);
4210         return err ?: len;
4211 }
4212 static struct md_sysfs_entry md_chunk_size =
4213 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
4214
4215 static ssize_t
4216 resync_start_show(struct mddev *mddev, char *page)
4217 {
4218         if (mddev->recovery_cp == MaxSector)
4219                 return sprintf(page, "none\n");
4220         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
4221 }
4222
4223 static ssize_t
4224 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
4225 {
4226         unsigned long long n;
4227         int err;
4228
4229         if (cmd_match(buf, "none"))
4230                 n = MaxSector;
4231         else {
4232                 err = kstrtoull(buf, 10, &n);
4233                 if (err < 0)
4234                         return err;
4235                 if (n != (sector_t)n)
4236                         return -EINVAL;
4237         }
4238
4239         err = mddev_lock(mddev);
4240         if (err)
4241                 return err;
4242         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
4243                 err = -EBUSY;
4244
4245         if (!err) {
4246                 mddev->recovery_cp = n;
4247                 if (mddev->pers)
4248                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4249         }
4250         mddev_unlock(mddev);
4251         return err ?: len;
4252 }
4253 static struct md_sysfs_entry md_resync_start =
4254 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
4255                 resync_start_show, resync_start_store);
4256
4257 /*
4258  * The array state can be:
4259  *
4260  * clear
4261  *     No devices, no size, no level
4262  *     Equivalent to STOP_ARRAY ioctl
4263  * inactive
4264  *     May have some settings, but array is not active
4265  *        all IO results in error
4266  *     When written, doesn't tear down array, but just stops it
4267  * suspended (not supported yet)
4268  *     All IO requests will block. The array can be reconfigured.
4269  *     Writing this, if accepted, will block until array is quiescent
4270  * readonly
4271  *     no resync can happen.  no superblocks get written.
4272  *     write requests fail
4273  * read-auto
4274  *     like readonly, but behaves like 'clean' on a write request.
4275  *
4276  * clean - no pending writes, but otherwise active.
4277  *     When written to inactive array, starts without resync
4278  *     If a write request arrives then
4279  *       if metadata is known, mark 'dirty' and switch to 'active'.
4280  *       if not known, block and switch to write-pending
4281  *     If written to an active array that has pending writes, then fails.
4282  * active
4283  *     fully active: IO and resync can be happening.
4284  *     When written to inactive array, starts with resync
4285  *
4286  * write-pending
4287  *     clean, but writes are blocked waiting for 'active' to be written.
4288  *
4289  * active-idle
4290  *     like active, but no writes have been seen for a while (100msec).
4291  *
4292  * broken
4293 *     Array is failed. It's useful because mounted-arrays aren't stopped
4294 *     when array is failed, so this state will at least alert the user that
4295 *     something is wrong.
4296  */
4297 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
4298                    write_pending, active_idle, broken, bad_word};
4299 static char *array_states[] = {
4300         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
4301         "write-pending", "active-idle", "broken", NULL };
4302
4303 static int match_word(const char *word, char **list)
4304 {
4305         int n;
4306         for (n=0; list[n]; n++)
4307                 if (cmd_match(word, list[n]))
4308                         break;
4309         return n;
4310 }
4311
4312 static ssize_t
4313 array_state_show(struct mddev *mddev, char *page)
4314 {
4315         enum array_state st = inactive;
4316
4317         if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) {
4318                 switch(mddev->ro) {
4319                 case MD_RDONLY:
4320                         st = readonly;
4321                         break;
4322                 case MD_AUTO_READ:
4323                         st = read_auto;
4324                         break;
4325                 case MD_RDWR:
4326                         spin_lock(&mddev->lock);
4327                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
4328                                 st = write_pending;
4329                         else if (mddev->in_sync)
4330                                 st = clean;
4331                         else if (mddev->safemode)
4332                                 st = active_idle;
4333                         else
4334                                 st = active;
4335                         spin_unlock(&mddev->lock);
4336                 }
4337
4338                 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean)
4339                         st = broken;
4340         } else {
4341                 if (list_empty(&mddev->disks) &&
4342                     mddev->raid_disks == 0 &&
4343                     mddev->dev_sectors == 0)
4344                         st = clear;
4345                 else
4346                         st = inactive;
4347         }
4348         return sprintf(page, "%s\n", array_states[st]);
4349 }
4350
4351 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
4352 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
4353 static int restart_array(struct mddev *mddev);
4354
4355 static ssize_t
4356 array_state_store(struct mddev *mddev, const char *buf, size_t len)
4357 {
4358         int err = 0;
4359         enum array_state st = match_word(buf, array_states);
4360
4361         if (mddev->pers && (st == active || st == clean) &&
4362             mddev->ro != MD_RDONLY) {
4363                 /* don't take reconfig_mutex when toggling between
4364                  * clean and active
4365                  */
4366                 spin_lock(&mddev->lock);
4367                 if (st == active) {
4368                         restart_array(mddev);
4369                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4370                         md_wakeup_thread(mddev->thread);
4371                         wake_up(&mddev->sb_wait);
4372                 } else /* st == clean */ {
4373                         restart_array(mddev);
4374                         if (!set_in_sync(mddev))
4375                                 err = -EBUSY;
4376                 }
4377                 if (!err)
4378                         sysfs_notify_dirent_safe(mddev->sysfs_state);
4379                 spin_unlock(&mddev->lock);
4380                 return err ?: len;
4381         }
4382         err = mddev_lock(mddev);
4383         if (err)
4384                 return err;
4385         err = -EINVAL;
4386         switch(st) {
4387         case bad_word:
4388                 break;
4389         case clear:
4390                 /* stopping an active array */
4391                 err = do_md_stop(mddev, 0, NULL);
4392                 break;
4393         case inactive:
4394                 /* stopping an active array */
4395                 if (mddev->pers)
4396                         err = do_md_stop(mddev, 2, NULL);
4397                 else
4398                         err = 0; /* already inactive */
4399                 break;
4400         case suspended:
4401                 break; /* not supported yet */
4402         case readonly:
4403                 if (mddev->pers)
4404                         err = md_set_readonly(mddev, NULL);
4405                 else {
4406                         mddev->ro = MD_RDONLY;
4407                         set_disk_ro(mddev->gendisk, 1);
4408                         err = do_md_run(mddev);
4409                 }
4410                 break;
4411         case read_auto:
4412                 if (mddev->pers) {
4413                         if (md_is_rdwr(mddev))
4414                                 err = md_set_readonly(mddev, NULL);
4415                         else if (mddev->ro == MD_RDONLY)
4416                                 err = restart_array(mddev);
4417                         if (err == 0) {
4418                                 mddev->ro = MD_AUTO_READ;
4419                                 set_disk_ro(mddev->gendisk, 0);
4420                         }
4421                 } else {
4422                         mddev->ro = MD_AUTO_READ;
4423                         err = do_md_run(mddev);
4424                 }
4425                 break;
4426         case clean:
4427                 if (mddev->pers) {
4428                         err = restart_array(mddev);
4429                         if (err)
4430                                 break;
4431                         spin_lock(&mddev->lock);
4432                         if (!set_in_sync(mddev))
4433                                 err = -EBUSY;
4434                         spin_unlock(&mddev->lock);
4435                 } else
4436                         err = -EINVAL;
4437                 break;
4438         case active:
4439                 if (mddev->pers) {
4440                         err = restart_array(mddev);
4441                         if (err)
4442                                 break;
4443                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4444                         wake_up(&mddev->sb_wait);
4445                         err = 0;
4446                 } else {
4447                         mddev->ro = MD_RDWR;
4448                         set_disk_ro(mddev->gendisk, 0);
4449                         err = do_md_run(mddev);
4450                 }
4451                 break;
4452         case write_pending:
4453         case active_idle:
4454         case broken:
4455                 /* these cannot be set */
4456                 break;
4457         }
4458
4459         if (!err) {
4460                 if (mddev->hold_active == UNTIL_IOCTL)
4461                         mddev->hold_active = 0;
4462                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4463         }
4464         mddev_unlock(mddev);
4465         return err ?: len;
4466 }
4467 static struct md_sysfs_entry md_array_state =
4468 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4469
4470 static ssize_t
4471 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4472         return sprintf(page, "%d\n",
4473                        atomic_read(&mddev->max_corr_read_errors));
4474 }
4475
4476 static ssize_t
4477 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4478 {
4479         unsigned int n;
4480         int rv;
4481
4482         rv = kstrtouint(buf, 10, &n);
4483         if (rv < 0)
4484                 return rv;
4485         if (n > INT_MAX)
4486                 return -EINVAL;
4487         atomic_set(&mddev->max_corr_read_errors, n);
4488         return len;
4489 }
4490
4491 static struct md_sysfs_entry max_corr_read_errors =
4492 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4493         max_corrected_read_errors_store);
4494
4495 static ssize_t
4496 null_show(struct mddev *mddev, char *page)
4497 {
4498         return -EINVAL;
4499 }
4500
4501 static ssize_t
4502 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4503 {
4504         /* buf must be %d:%d\n? giving major and minor numbers */
4505         /* The new device is added to the array.
4506          * If the array has a persistent superblock, we read the
4507          * superblock to initialise info and check validity.
4508          * Otherwise, only checking done is that in bind_rdev_to_array,
4509          * which mainly checks size.
4510          */
4511         char *e;
4512         int major = simple_strtoul(buf, &e, 10);
4513         int minor;
4514         dev_t dev;
4515         struct md_rdev *rdev;
4516         int err;
4517
4518         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4519                 return -EINVAL;
4520         minor = simple_strtoul(e+1, &e, 10);
4521         if (*e && *e != '\n')
4522                 return -EINVAL;
4523         dev = MKDEV(major, minor);
4524         if (major != MAJOR(dev) ||
4525             minor != MINOR(dev))
4526                 return -EOVERFLOW;
4527
4528         err = mddev_lock(mddev);
4529         if (err)
4530                 return err;
4531         if (mddev->persistent) {
4532                 rdev = md_import_device(dev, mddev->major_version,
4533                                         mddev->minor_version);
4534                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4535                         struct md_rdev *rdev0
4536                                 = list_entry(mddev->disks.next,
4537                                              struct md_rdev, same_set);
4538                         err = super_types[mddev->major_version]
4539                                 .load_super(rdev, rdev0, mddev->minor_version);
4540                         if (err < 0)
4541                                 goto out;
4542                 }
4543         } else if (mddev->external)
4544                 rdev = md_import_device(dev, -2, -1);
4545         else
4546                 rdev = md_import_device(dev, -1, -1);
4547
4548         if (IS_ERR(rdev)) {
4549                 mddev_unlock(mddev);
4550                 return PTR_ERR(rdev);
4551         }
4552         err = bind_rdev_to_array(rdev, mddev);
4553  out:
4554         if (err)
4555                 export_rdev(rdev, mddev);
4556         mddev_unlock(mddev);
4557         if (!err)
4558                 md_new_event();
4559         return err ? err : len;
4560 }
4561
4562 static struct md_sysfs_entry md_new_device =
4563 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4564
4565 static ssize_t
4566 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4567 {
4568         char *end;
4569         unsigned long chunk, end_chunk;
4570         int err;
4571
4572         err = mddev_lock(mddev);
4573         if (err)
4574                 return err;
4575         if (!mddev->bitmap)
4576                 goto out;
4577         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4578         while (*buf) {
4579                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4580                 if (buf == end) break;
4581                 if (*end == '-') { /* range */
4582                         buf = end + 1;
4583                         end_chunk = simple_strtoul(buf, &end, 0);
4584                         if (buf == end) break;
4585                 }
4586                 if (*end && !isspace(*end)) break;
4587                 md_bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4588                 buf = skip_spaces(end);
4589         }
4590         md_bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4591 out:
4592         mddev_unlock(mddev);
4593         return len;
4594 }
4595
4596 static struct md_sysfs_entry md_bitmap =
4597 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4598
4599 static ssize_t
4600 size_show(struct mddev *mddev, char *page)
4601 {
4602         return sprintf(page, "%llu\n",
4603                 (unsigned long long)mddev->dev_sectors / 2);
4604 }
4605
4606 static int update_size(struct mddev *mddev, sector_t num_sectors);
4607
4608 static ssize_t
4609 size_store(struct mddev *mddev, const char *buf, size_t len)
4610 {
4611         /* If array is inactive, we can reduce the component size, but
4612          * not increase it (except from 0).
4613          * If array is active, we can try an on-line resize
4614          */
4615         sector_t sectors;
4616         int err = strict_blocks_to_sectors(buf, &sectors);
4617
4618         if (err < 0)
4619                 return err;
4620         err = mddev_lock(mddev);
4621         if (err)
4622                 return err;
4623         if (mddev->pers) {
4624                 err = update_size(mddev, sectors);
4625                 if (err == 0)
4626                         md_update_sb(mddev, 1);
4627         } else {
4628                 if (mddev->dev_sectors == 0 ||
4629                     mddev->dev_sectors > sectors)
4630                         mddev->dev_sectors = sectors;
4631                 else
4632                         err = -ENOSPC;
4633         }
4634         mddev_unlock(mddev);
4635         return err ? err : len;
4636 }
4637
4638 static struct md_sysfs_entry md_size =
4639 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4640
4641 /* Metadata version.
4642  * This is one of
4643  *   'none' for arrays with no metadata (good luck...)
4644  *   'external' for arrays with externally managed metadata,
4645  * or N.M for internally known formats
4646  */
4647 static ssize_t
4648 metadata_show(struct mddev *mddev, char *page)
4649 {
4650         if (mddev->persistent)
4651                 return sprintf(page, "%d.%d\n",
4652                                mddev->major_version, mddev->minor_version);
4653         else if (mddev->external)
4654                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4655         else
4656                 return sprintf(page, "none\n");
4657 }
4658
4659 static ssize_t
4660 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4661 {
4662         int major, minor;
4663         char *e;
4664         int err;
4665         /* Changing the details of 'external' metadata is
4666          * always permitted.  Otherwise there must be
4667          * no devices attached to the array.
4668          */
4669
4670         err = mddev_lock(mddev);
4671         if (err)
4672                 return err;
4673         err = -EBUSY;
4674         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4675                 ;
4676         else if (!list_empty(&mddev->disks))
4677                 goto out_unlock;
4678
4679         err = 0;
4680         if (cmd_match(buf, "none")) {
4681                 mddev->persistent = 0;
4682                 mddev->external = 0;
4683                 mddev->major_version = 0;
4684                 mddev->minor_version = 90;
4685                 goto out_unlock;
4686         }
4687         if (strncmp(buf, "external:", 9) == 0) {
4688                 size_t namelen = len-9;
4689                 if (namelen >= sizeof(mddev->metadata_type))
4690                         namelen = sizeof(mddev->metadata_type)-1;
4691                 strncpy(mddev->metadata_type, buf+9, namelen);
4692                 mddev->metadata_type[namelen] = 0;
4693                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4694                         mddev->metadata_type[--namelen] = 0;
4695                 mddev->persistent = 0;
4696                 mddev->external = 1;
4697                 mddev->major_version = 0;
4698                 mddev->minor_version = 90;
4699                 goto out_unlock;
4700         }
4701         major = simple_strtoul(buf, &e, 10);
4702         err = -EINVAL;
4703         if (e==buf || *e != '.')
4704                 goto out_unlock;
4705         buf = e+1;
4706         minor = simple_strtoul(buf, &e, 10);
4707         if (e==buf || (*e && *e != '\n') )
4708                 goto out_unlock;
4709         err = -ENOENT;
4710         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4711                 goto out_unlock;
4712         mddev->major_version = major;
4713         mddev->minor_version = minor;
4714         mddev->persistent = 1;
4715         mddev->external = 0;
4716         err = 0;
4717 out_unlock:
4718         mddev_unlock(mddev);
4719         return err ?: len;
4720 }
4721
4722 static struct md_sysfs_entry md_metadata =
4723 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4724
4725 static ssize_t
4726 action_show(struct mddev *mddev, char *page)
4727 {
4728         char *type = "idle";
4729         unsigned long recovery = mddev->recovery;
4730         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4731                 type = "frozen";
4732         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4733             (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4734                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4735                         type = "reshape";
4736                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4737                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4738                                 type = "resync";
4739                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4740                                 type = "check";
4741                         else
4742                                 type = "repair";
4743                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4744                         type = "recover";
4745                 else if (mddev->reshape_position != MaxSector)
4746                         type = "reshape";
4747         }
4748         return sprintf(page, "%s\n", type);
4749 }
4750
4751 static void stop_sync_thread(struct mddev *mddev)
4752 {
4753         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4754                 return;
4755
4756         if (mddev_lock(mddev))
4757                 return;
4758
4759         /*
4760          * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
4761          * held.
4762          */
4763         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4764                 mddev_unlock(mddev);
4765                 return;
4766         }
4767
4768         if (work_pending(&mddev->del_work))
4769                 flush_workqueue(md_misc_wq);
4770
4771         if (mddev->sync_thread) {
4772                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4773                 md_reap_sync_thread(mddev);
4774         }
4775
4776         mddev_unlock(mddev);
4777 }
4778
4779 static void idle_sync_thread(struct mddev *mddev)
4780 {
4781         mutex_lock(&mddev->sync_mutex);
4782         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4783         stop_sync_thread(mddev);
4784         mutex_unlock(&mddev->sync_mutex);
4785 }
4786
4787 static void frozen_sync_thread(struct mddev *mddev)
4788 {
4789         mutex_lock(&mddev->sync_mutex);
4790         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4791         stop_sync_thread(mddev);
4792         mutex_unlock(&mddev->sync_mutex);
4793 }
4794
4795 static ssize_t
4796 action_store(struct mddev *mddev, const char *page, size_t len)
4797 {
4798         if (!mddev->pers || !mddev->pers->sync_request)
4799                 return -EINVAL;
4800
4801
4802         if (cmd_match(page, "idle"))
4803                 idle_sync_thread(mddev);
4804         else if (cmd_match(page, "frozen"))
4805                 frozen_sync_thread(mddev);
4806         else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4807                 return -EBUSY;
4808         else if (cmd_match(page, "resync"))
4809                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4810         else if (cmd_match(page, "recover")) {
4811                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4812                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4813         } else if (cmd_match(page, "reshape")) {
4814                 int err;
4815                 if (mddev->pers->start_reshape == NULL)
4816                         return -EINVAL;
4817                 err = mddev_lock(mddev);
4818                 if (!err) {
4819                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
4820                                 err =  -EBUSY;
4821                         } else if (mddev->reshape_position == MaxSector ||
4822                                    mddev->pers->check_reshape == NULL ||
4823                                    mddev->pers->check_reshape(mddev)) {
4824                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4825                                 err = mddev->pers->start_reshape(mddev);
4826                         } else {
4827                                 /*
4828                                  * If reshape is still in progress, and
4829                                  * md_check_recovery() can continue to reshape,
4830                                  * don't restart reshape because data can be
4831                                  * corrupted for raid456.
4832                                  */
4833                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4834                         }
4835                         mddev_unlock(mddev);
4836                 }
4837                 if (err)
4838                         return err;
4839                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
4840         } else {
4841                 if (cmd_match(page, "check"))
4842                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4843                 else if (!cmd_match(page, "repair"))
4844                         return -EINVAL;
4845                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4846                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4847                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4848         }
4849         if (mddev->ro == MD_AUTO_READ) {
4850                 /* A write to sync_action is enough to justify
4851                  * canceling read-auto mode
4852                  */
4853                 mddev->ro = MD_RDWR;
4854                 md_wakeup_thread(mddev->sync_thread);
4855         }
4856         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4857         md_wakeup_thread(mddev->thread);
4858         sysfs_notify_dirent_safe(mddev->sysfs_action);
4859         return len;
4860 }
4861
4862 static struct md_sysfs_entry md_scan_mode =
4863 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4864
4865 static ssize_t
4866 last_sync_action_show(struct mddev *mddev, char *page)
4867 {
4868         return sprintf(page, "%s\n", mddev->last_sync_action);
4869 }
4870
4871 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4872
4873 static ssize_t
4874 mismatch_cnt_show(struct mddev *mddev, char *page)
4875 {
4876         return sprintf(page, "%llu\n",
4877                        (unsigned long long)
4878                        atomic64_read(&mddev->resync_mismatches));
4879 }
4880
4881 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4882
4883 static ssize_t
4884 sync_min_show(struct mddev *mddev, char *page)
4885 {
4886         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4887                        mddev->sync_speed_min ? "local": "system");
4888 }
4889
4890 static ssize_t
4891 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4892 {
4893         unsigned int min;
4894         int rv;
4895
4896         if (strncmp(buf, "system", 6)==0) {
4897                 min = 0;
4898         } else {
4899                 rv = kstrtouint(buf, 10, &min);
4900                 if (rv < 0)
4901                         return rv;
4902                 if (min == 0)
4903                         return -EINVAL;
4904         }
4905         mddev->sync_speed_min = min;
4906         return len;
4907 }
4908
4909 static struct md_sysfs_entry md_sync_min =
4910 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4911
4912 static ssize_t
4913 sync_max_show(struct mddev *mddev, char *page)
4914 {
4915         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4916                        mddev->sync_speed_max ? "local": "system");
4917 }
4918
4919 static ssize_t
4920 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4921 {
4922         unsigned int max;
4923         int rv;
4924
4925         if (strncmp(buf, "system", 6)==0) {
4926                 max = 0;
4927         } else {
4928                 rv = kstrtouint(buf, 10, &max);
4929                 if (rv < 0)
4930                         return rv;
4931                 if (max == 0)
4932                         return -EINVAL;
4933         }
4934         mddev->sync_speed_max = max;
4935         return len;
4936 }
4937
4938 static struct md_sysfs_entry md_sync_max =
4939 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4940
4941 static ssize_t
4942 degraded_show(struct mddev *mddev, char *page)
4943 {
4944         return sprintf(page, "%d\n", mddev->degraded);
4945 }
4946 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4947
4948 static ssize_t
4949 sync_force_parallel_show(struct mddev *mddev, char *page)
4950 {
4951         return sprintf(page, "%d\n", mddev->parallel_resync);
4952 }
4953
4954 static ssize_t
4955 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4956 {
4957         long n;
4958
4959         if (kstrtol(buf, 10, &n))
4960                 return -EINVAL;
4961
4962         if (n != 0 && n != 1)
4963                 return -EINVAL;
4964
4965         mddev->parallel_resync = n;
4966
4967         if (mddev->sync_thread)
4968                 wake_up(&resync_wait);
4969
4970         return len;
4971 }
4972
4973 /* force parallel resync, even with shared block devices */
4974 static struct md_sysfs_entry md_sync_force_parallel =
4975 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4976        sync_force_parallel_show, sync_force_parallel_store);
4977
4978 static ssize_t
4979 sync_speed_show(struct mddev *mddev, char *page)
4980 {
4981         unsigned long resync, dt, db;
4982         if (mddev->curr_resync == MD_RESYNC_NONE)
4983                 return sprintf(page, "none\n");
4984         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4985         dt = (jiffies - mddev->resync_mark) / HZ;
4986         if (!dt) dt++;
4987         db = resync - mddev->resync_mark_cnt;
4988         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4989 }
4990
4991 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4992
4993 static ssize_t
4994 sync_completed_show(struct mddev *mddev, char *page)
4995 {
4996         unsigned long long max_sectors, resync;
4997
4998         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4999                 return sprintf(page, "none\n");
5000
5001         if (mddev->curr_resync == MD_RESYNC_YIELDED ||
5002             mddev->curr_resync == MD_RESYNC_DELAYED)
5003                 return sprintf(page, "delayed\n");
5004
5005         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
5006             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5007                 max_sectors = mddev->resync_max_sectors;
5008         else
5009                 max_sectors = mddev->dev_sectors;
5010
5011         resync = mddev->curr_resync_completed;
5012         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
5013 }
5014
5015 static struct md_sysfs_entry md_sync_completed =
5016         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
5017
5018 static ssize_t
5019 min_sync_show(struct mddev *mddev, char *page)
5020 {
5021         return sprintf(page, "%llu\n",
5022                        (unsigned long long)mddev->resync_min);
5023 }
5024 static ssize_t
5025 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
5026 {
5027         unsigned long long min;
5028         int err;
5029
5030         if (kstrtoull(buf, 10, &min))
5031                 return -EINVAL;
5032
5033         spin_lock(&mddev->lock);
5034         err = -EINVAL;
5035         if (min > mddev->resync_max)
5036                 goto out_unlock;
5037
5038         err = -EBUSY;
5039         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5040                 goto out_unlock;
5041
5042         /* Round down to multiple of 4K for safety */
5043         mddev->resync_min = round_down(min, 8);
5044         err = 0;
5045
5046 out_unlock:
5047         spin_unlock(&mddev->lock);
5048         return err ?: len;
5049 }
5050
5051 static struct md_sysfs_entry md_min_sync =
5052 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
5053
5054 static ssize_t
5055 max_sync_show(struct mddev *mddev, char *page)
5056 {
5057         if (mddev->resync_max == MaxSector)
5058                 return sprintf(page, "max\n");
5059         else
5060                 return sprintf(page, "%llu\n",
5061                                (unsigned long long)mddev->resync_max);
5062 }
5063 static ssize_t
5064 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
5065 {
5066         int err;
5067         spin_lock(&mddev->lock);
5068         if (strncmp(buf, "max", 3) == 0)
5069                 mddev->resync_max = MaxSector;
5070         else {
5071                 unsigned long long max;
5072                 int chunk;
5073
5074                 err = -EINVAL;
5075                 if (kstrtoull(buf, 10, &max))
5076                         goto out_unlock;
5077                 if (max < mddev->resync_min)
5078                         goto out_unlock;
5079
5080                 err = -EBUSY;
5081                 if (max < mddev->resync_max && md_is_rdwr(mddev) &&
5082                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5083                         goto out_unlock;
5084
5085                 /* Must be a multiple of chunk_size */
5086                 chunk = mddev->chunk_sectors;
5087                 if (chunk) {
5088                         sector_t temp = max;
5089
5090                         err = -EINVAL;
5091                         if (sector_div(temp, chunk))
5092                                 goto out_unlock;
5093                 }
5094                 mddev->resync_max = max;
5095         }
5096         wake_up(&mddev->recovery_wait);
5097         err = 0;
5098 out_unlock:
5099         spin_unlock(&mddev->lock);
5100         return err ?: len;
5101 }
5102
5103 static struct md_sysfs_entry md_max_sync =
5104 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
5105
5106 static ssize_t
5107 suspend_lo_show(struct mddev *mddev, char *page)
5108 {
5109         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
5110 }
5111
5112 static ssize_t
5113 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
5114 {
5115         unsigned long long new;
5116         int err;
5117
5118         err = kstrtoull(buf, 10, &new);
5119         if (err < 0)
5120                 return err;
5121         if (new != (sector_t)new)
5122                 return -EINVAL;
5123
5124         err = mddev_lock(mddev);
5125         if (err)
5126                 return err;
5127         err = -EINVAL;
5128         if (mddev->pers == NULL ||
5129             mddev->pers->quiesce == NULL)
5130                 goto unlock;
5131         mddev_suspend(mddev);
5132         mddev->suspend_lo = new;
5133         mddev_resume(mddev);
5134
5135         err = 0;
5136 unlock:
5137         mddev_unlock(mddev);
5138         return err ?: len;
5139 }
5140 static struct md_sysfs_entry md_suspend_lo =
5141 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
5142
5143 static ssize_t
5144 suspend_hi_show(struct mddev *mddev, char *page)
5145 {
5146         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
5147 }
5148
5149 static ssize_t
5150 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
5151 {
5152         unsigned long long new;
5153         int err;
5154
5155         err = kstrtoull(buf, 10, &new);
5156         if (err < 0)
5157                 return err;
5158         if (new != (sector_t)new)
5159                 return -EINVAL;
5160
5161         err = mddev_lock(mddev);
5162         if (err)
5163                 return err;
5164         err = -EINVAL;
5165         if (mddev->pers == NULL)
5166                 goto unlock;
5167
5168         mddev_suspend(mddev);
5169         mddev->suspend_hi = new;
5170         mddev_resume(mddev);
5171
5172         err = 0;
5173 unlock:
5174         mddev_unlock(mddev);
5175         return err ?: len;
5176 }
5177 static struct md_sysfs_entry md_suspend_hi =
5178 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
5179
5180 static ssize_t
5181 reshape_position_show(struct mddev *mddev, char *page)
5182 {
5183         if (mddev->reshape_position != MaxSector)
5184                 return sprintf(page, "%llu\n",
5185                                (unsigned long long)mddev->reshape_position);
5186         strcpy(page, "none\n");
5187         return 5;
5188 }
5189
5190 static ssize_t
5191 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
5192 {
5193         struct md_rdev *rdev;
5194         unsigned long long new;
5195         int err;
5196
5197         err = kstrtoull(buf, 10, &new);
5198         if (err < 0)
5199                 return err;
5200         if (new != (sector_t)new)
5201                 return -EINVAL;
5202         err = mddev_lock(mddev);
5203         if (err)
5204                 return err;
5205         err = -EBUSY;
5206         if (mddev->pers)
5207                 goto unlock;
5208         mddev->reshape_position = new;
5209         mddev->delta_disks = 0;
5210         mddev->reshape_backwards = 0;
5211         mddev->new_level = mddev->level;
5212         mddev->new_layout = mddev->layout;
5213         mddev->new_chunk_sectors = mddev->chunk_sectors;
5214         rdev_for_each(rdev, mddev)
5215                 rdev->new_data_offset = rdev->data_offset;
5216         err = 0;
5217 unlock:
5218         mddev_unlock(mddev);
5219         return err ?: len;
5220 }
5221
5222 static struct md_sysfs_entry md_reshape_position =
5223 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
5224        reshape_position_store);
5225
5226 static ssize_t
5227 reshape_direction_show(struct mddev *mddev, char *page)
5228 {
5229         return sprintf(page, "%s\n",
5230                        mddev->reshape_backwards ? "backwards" : "forwards");
5231 }
5232
5233 static ssize_t
5234 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
5235 {
5236         int backwards = 0;
5237         int err;
5238
5239         if (cmd_match(buf, "forwards"))
5240                 backwards = 0;
5241         else if (cmd_match(buf, "backwards"))
5242                 backwards = 1;
5243         else
5244                 return -EINVAL;
5245         if (mddev->reshape_backwards == backwards)
5246                 return len;
5247
5248         err = mddev_lock(mddev);
5249         if (err)
5250                 return err;
5251         /* check if we are allowed to change */
5252         if (mddev->delta_disks)
5253                 err = -EBUSY;
5254         else if (mddev->persistent &&
5255             mddev->major_version == 0)
5256                 err =  -EINVAL;
5257         else
5258                 mddev->reshape_backwards = backwards;
5259         mddev_unlock(mddev);
5260         return err ?: len;
5261 }
5262
5263 static struct md_sysfs_entry md_reshape_direction =
5264 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
5265        reshape_direction_store);
5266
5267 static ssize_t
5268 array_size_show(struct mddev *mddev, char *page)
5269 {
5270         if (mddev->external_size)
5271                 return sprintf(page, "%llu\n",
5272                                (unsigned long long)mddev->array_sectors/2);
5273         else
5274                 return sprintf(page, "default\n");
5275 }
5276
5277 static ssize_t
5278 array_size_store(struct mddev *mddev, const char *buf, size_t len)
5279 {
5280         sector_t sectors;
5281         int err;
5282
5283         err = mddev_lock(mddev);
5284         if (err)
5285                 return err;
5286
5287         /* cluster raid doesn't support change array_sectors */
5288         if (mddev_is_clustered(mddev)) {
5289                 mddev_unlock(mddev);
5290                 return -EINVAL;
5291         }
5292
5293         if (strncmp(buf, "default", 7) == 0) {
5294                 if (mddev->pers)
5295                         sectors = mddev->pers->size(mddev, 0, 0);
5296                 else
5297                         sectors = mddev->array_sectors;
5298
5299                 mddev->external_size = 0;
5300         } else {
5301                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
5302                         err = -EINVAL;
5303                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
5304                         err = -E2BIG;
5305                 else
5306                         mddev->external_size = 1;
5307         }
5308
5309         if (!err) {
5310                 mddev->array_sectors = sectors;
5311                 if (mddev->pers)
5312                         set_capacity_and_notify(mddev->gendisk,
5313                                                 mddev->array_sectors);
5314         }
5315         mddev_unlock(mddev);
5316         return err ?: len;
5317 }
5318
5319 static struct md_sysfs_entry md_array_size =
5320 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
5321        array_size_store);
5322
5323 static ssize_t
5324 consistency_policy_show(struct mddev *mddev, char *page)
5325 {
5326         int ret;
5327
5328         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5329                 ret = sprintf(page, "journal\n");
5330         } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
5331                 ret = sprintf(page, "ppl\n");
5332         } else if (mddev->bitmap) {
5333                 ret = sprintf(page, "bitmap\n");
5334         } else if (mddev->pers) {
5335                 if (mddev->pers->sync_request)
5336                         ret = sprintf(page, "resync\n");
5337                 else
5338                         ret = sprintf(page, "none\n");
5339         } else {
5340                 ret = sprintf(page, "unknown\n");
5341         }
5342
5343         return ret;
5344 }
5345
5346 static ssize_t
5347 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
5348 {
5349         int err = 0;
5350
5351         if (mddev->pers) {
5352                 if (mddev->pers->change_consistency_policy)
5353                         err = mddev->pers->change_consistency_policy(mddev, buf);
5354                 else
5355                         err = -EBUSY;
5356         } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
5357                 set_bit(MD_HAS_PPL, &mddev->flags);
5358         } else {
5359                 err = -EINVAL;
5360         }
5361
5362         return err ? err : len;
5363 }
5364
5365 static struct md_sysfs_entry md_consistency_policy =
5366 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
5367        consistency_policy_store);
5368
5369 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page)
5370 {
5371         return sprintf(page, "%d\n", mddev->fail_last_dev);
5372 }
5373
5374 /*
5375  * Setting fail_last_dev to true to allow last device to be forcibly removed
5376  * from RAID1/RAID10.
5377  */
5378 static ssize_t
5379 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len)
5380 {
5381         int ret;
5382         bool value;
5383
5384         ret = kstrtobool(buf, &value);
5385         if (ret)
5386                 return ret;
5387
5388         if (value != mddev->fail_last_dev)
5389                 mddev->fail_last_dev = value;
5390
5391         return len;
5392 }
5393 static struct md_sysfs_entry md_fail_last_dev =
5394 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show,
5395        fail_last_dev_store);
5396
5397 static ssize_t serialize_policy_show(struct mddev *mddev, char *page)
5398 {
5399         if (mddev->pers == NULL || (mddev->pers->level != 1))
5400                 return sprintf(page, "n/a\n");
5401         else
5402                 return sprintf(page, "%d\n", mddev->serialize_policy);
5403 }
5404
5405 /*
5406  * Setting serialize_policy to true to enforce write IO is not reordered
5407  * for raid1.
5408  */
5409 static ssize_t
5410 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len)
5411 {
5412         int err;
5413         bool value;
5414
5415         err = kstrtobool(buf, &value);
5416         if (err)
5417                 return err;
5418
5419         if (value == mddev->serialize_policy)
5420                 return len;
5421
5422         err = mddev_lock(mddev);
5423         if (err)
5424                 return err;
5425         if (mddev->pers == NULL || (mddev->pers->level != 1)) {
5426                 pr_err("md: serialize_policy is only effective for raid1\n");
5427                 err = -EINVAL;
5428                 goto unlock;
5429         }
5430
5431         mddev_suspend(mddev);
5432         if (value)
5433                 mddev_create_serial_pool(mddev, NULL, true);
5434         else
5435                 mddev_destroy_serial_pool(mddev, NULL, true);
5436         mddev->serialize_policy = value;
5437         mddev_resume(mddev);
5438 unlock:
5439         mddev_unlock(mddev);
5440         return err ?: len;
5441 }
5442
5443 static struct md_sysfs_entry md_serialize_policy =
5444 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
5445        serialize_policy_store);
5446
5447
5448 static struct attribute *md_default_attrs[] = {
5449         &md_level.attr,
5450         &md_layout.attr,
5451         &md_raid_disks.attr,
5452         &md_uuid.attr,
5453         &md_chunk_size.attr,
5454         &md_size.attr,
5455         &md_resync_start.attr,
5456         &md_metadata.attr,
5457         &md_new_device.attr,
5458         &md_safe_delay.attr,
5459         &md_array_state.attr,
5460         &md_reshape_position.attr,
5461         &md_reshape_direction.attr,
5462         &md_array_size.attr,
5463         &max_corr_read_errors.attr,
5464         &md_consistency_policy.attr,
5465         &md_fail_last_dev.attr,
5466         &md_serialize_policy.attr,
5467         NULL,
5468 };
5469
5470 static const struct attribute_group md_default_group = {
5471         .attrs = md_default_attrs,
5472 };
5473
5474 static struct attribute *md_redundancy_attrs[] = {
5475         &md_scan_mode.attr,
5476         &md_last_scan_mode.attr,
5477         &md_mismatches.attr,
5478         &md_sync_min.attr,
5479         &md_sync_max.attr,
5480         &md_sync_speed.attr,
5481         &md_sync_force_parallel.attr,
5482         &md_sync_completed.attr,
5483         &md_min_sync.attr,
5484         &md_max_sync.attr,
5485         &md_suspend_lo.attr,
5486         &md_suspend_hi.attr,
5487         &md_bitmap.attr,
5488         &md_degraded.attr,
5489         NULL,
5490 };
5491 static const struct attribute_group md_redundancy_group = {
5492         .name = NULL,
5493         .attrs = md_redundancy_attrs,
5494 };
5495
5496 static const struct attribute_group *md_attr_groups[] = {
5497         &md_default_group,
5498         &md_bitmap_group,
5499         NULL,
5500 };
5501
5502 static ssize_t
5503 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
5504 {
5505         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5506         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5507         ssize_t rv;
5508
5509         if (!entry->show)
5510                 return -EIO;
5511         spin_lock(&all_mddevs_lock);
5512         if (!mddev_get(mddev)) {
5513                 spin_unlock(&all_mddevs_lock);
5514                 return -EBUSY;
5515         }
5516         spin_unlock(&all_mddevs_lock);
5517
5518         rv = entry->show(mddev, page);
5519         mddev_put(mddev);
5520         return rv;
5521 }
5522
5523 static ssize_t
5524 md_attr_store(struct kobject *kobj, struct attribute *attr,
5525               const char *page, size_t length)
5526 {
5527         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
5528         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
5529         ssize_t rv;
5530
5531         if (!entry->store)
5532                 return -EIO;
5533         if (!capable(CAP_SYS_ADMIN))
5534                 return -EACCES;
5535         spin_lock(&all_mddevs_lock);
5536         if (!mddev_get(mddev)) {
5537                 spin_unlock(&all_mddevs_lock);
5538                 return -EBUSY;
5539         }
5540         spin_unlock(&all_mddevs_lock);
5541         rv = entry->store(mddev, page, length);
5542         mddev_put(mddev);
5543         return rv;
5544 }
5545
5546 static void md_kobj_release(struct kobject *ko)
5547 {
5548         struct mddev *mddev = container_of(ko, struct mddev, kobj);
5549
5550         if (mddev->sysfs_state)
5551                 sysfs_put(mddev->sysfs_state);
5552         if (mddev->sysfs_level)
5553                 sysfs_put(mddev->sysfs_level);
5554
5555         del_gendisk(mddev->gendisk);
5556         put_disk(mddev->gendisk);
5557 }
5558
5559 static const struct sysfs_ops md_sysfs_ops = {
5560         .show   = md_attr_show,
5561         .store  = md_attr_store,
5562 };
5563 static const struct kobj_type md_ktype = {
5564         .release        = md_kobj_release,
5565         .sysfs_ops      = &md_sysfs_ops,
5566         .default_groups = md_attr_groups,
5567 };
5568
5569 int mdp_major = 0;
5570
5571 static void mddev_delayed_delete(struct work_struct *ws)
5572 {
5573         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5574
5575         kobject_put(&mddev->kobj);
5576 }
5577
5578 static void no_op(struct percpu_ref *r) {}
5579
5580 int mddev_init_writes_pending(struct mddev *mddev)
5581 {
5582         if (mddev->writes_pending.percpu_count_ptr)
5583                 return 0;
5584         if (percpu_ref_init(&mddev->writes_pending, no_op,
5585                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL) < 0)
5586                 return -ENOMEM;
5587         /* We want to start with the refcount at zero */
5588         percpu_ref_put(&mddev->writes_pending);
5589         return 0;
5590 }
5591 EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
5592
5593 struct mddev *md_alloc(dev_t dev, char *name)
5594 {
5595         /*
5596          * If dev is zero, name is the name of a device to allocate with
5597          * an arbitrary minor number.  It will be "md_???"
5598          * If dev is non-zero it must be a device number with a MAJOR of
5599          * MD_MAJOR or mdp_major.  In this case, if "name" is NULL, then
5600          * the device is being created by opening a node in /dev.
5601          * If "name" is not NULL, the device is being created by
5602          * writing to /sys/module/md_mod/parameters/new_array.
5603          */
5604         static DEFINE_MUTEX(disks_mutex);
5605         struct mddev *mddev;
5606         struct gendisk *disk;
5607         int partitioned;
5608         int shift;
5609         int unit;
5610         int error ;
5611
5612         /*
5613          * Wait for any previous instance of this device to be completely
5614          * removed (mddev_delayed_delete).
5615          */
5616         flush_workqueue(md_misc_wq);
5617
5618         mutex_lock(&disks_mutex);
5619         mddev = mddev_alloc(dev);
5620         if (IS_ERR(mddev)) {
5621                 error = PTR_ERR(mddev);
5622                 goto out_unlock;
5623         }
5624
5625         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5626         shift = partitioned ? MdpMinorShift : 0;
5627         unit = MINOR(mddev->unit) >> shift;
5628
5629         if (name && !dev) {
5630                 /* Need to ensure that 'name' is not a duplicate.
5631                  */
5632                 struct mddev *mddev2;
5633                 spin_lock(&all_mddevs_lock);
5634
5635                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5636                         if (mddev2->gendisk &&
5637                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5638                                 spin_unlock(&all_mddevs_lock);
5639                                 error = -EEXIST;
5640                                 goto out_free_mddev;
5641                         }
5642                 spin_unlock(&all_mddevs_lock);
5643         }
5644         if (name && dev)
5645                 /*
5646                  * Creating /dev/mdNNN via "newarray", so adjust hold_active.
5647                  */
5648                 mddev->hold_active = UNTIL_STOP;
5649
5650         error = -ENOMEM;
5651         disk = blk_alloc_disk(NUMA_NO_NODE);
5652         if (!disk)
5653                 goto out_free_mddev;
5654
5655         disk->major = MAJOR(mddev->unit);
5656         disk->first_minor = unit << shift;
5657         disk->minors = 1 << shift;
5658         if (name)
5659                 strcpy(disk->disk_name, name);
5660         else if (partitioned)
5661                 sprintf(disk->disk_name, "md_d%d", unit);
5662         else
5663                 sprintf(disk->disk_name, "md%d", unit);
5664         disk->fops = &md_fops;
5665         disk->private_data = mddev;
5666
5667         mddev->queue = disk->queue;
5668         blk_set_stacking_limits(&mddev->queue->limits);
5669         blk_queue_write_cache(mddev->queue, true, true);
5670         disk->events |= DISK_EVENT_MEDIA_CHANGE;
5671         mddev->gendisk = disk;
5672         error = add_disk(disk);
5673         if (error)
5674                 goto out_put_disk;
5675
5676         kobject_init(&mddev->kobj, &md_ktype);
5677         error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
5678         if (error) {
5679                 /*
5680                  * The disk is already live at this point.  Clear the hold flag
5681                  * and let mddev_put take care of the deletion, as it isn't any
5682                  * different from a normal close on last release now.
5683                  */
5684                 mddev->hold_active = 0;
5685                 mutex_unlock(&disks_mutex);
5686                 mddev_put(mddev);
5687                 return ERR_PTR(error);
5688         }
5689
5690         kobject_uevent(&mddev->kobj, KOBJ_ADD);
5691         mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5692         mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
5693         mutex_unlock(&disks_mutex);
5694         return mddev;
5695
5696 out_put_disk:
5697         put_disk(disk);
5698 out_free_mddev:
5699         mddev_free(mddev);
5700 out_unlock:
5701         mutex_unlock(&disks_mutex);
5702         return ERR_PTR(error);
5703 }
5704
5705 static int md_alloc_and_put(dev_t dev, char *name)
5706 {
5707         struct mddev *mddev = md_alloc(dev, name);
5708
5709         if (IS_ERR(mddev))
5710                 return PTR_ERR(mddev);
5711         mddev_put(mddev);
5712         return 0;
5713 }
5714
5715 static void md_probe(dev_t dev)
5716 {
5717         if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
5718                 return;
5719         if (create_on_open)
5720                 md_alloc_and_put(dev, NULL);
5721 }
5722
5723 static int add_named_array(const char *val, const struct kernel_param *kp)
5724 {
5725         /*
5726          * val must be "md_*" or "mdNNN".
5727          * For "md_*" we allocate an array with a large free minor number, and
5728          * set the name to val.  val must not already be an active name.
5729          * For "mdNNN" we allocate an array with the minor number NNN
5730          * which must not already be in use.
5731          */
5732         int len = strlen(val);
5733         char buf[DISK_NAME_LEN];
5734         unsigned long devnum;
5735
5736         while (len && val[len-1] == '\n')
5737                 len--;
5738         if (len >= DISK_NAME_LEN)
5739                 return -E2BIG;
5740         strscpy(buf, val, len+1);
5741         if (strncmp(buf, "md_", 3) == 0)
5742                 return md_alloc_and_put(0, buf);
5743         if (strncmp(buf, "md", 2) == 0 &&
5744             isdigit(buf[2]) &&
5745             kstrtoul(buf+2, 10, &devnum) == 0 &&
5746             devnum <= MINORMASK)
5747                 return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL);
5748
5749         return -EINVAL;
5750 }
5751
5752 static void md_safemode_timeout(struct timer_list *t)
5753 {
5754         struct mddev *mddev = from_timer(mddev, t, safemode_timer);
5755
5756         mddev->safemode = 1;
5757         if (mddev->external)
5758                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5759
5760         md_wakeup_thread(mddev->thread);
5761 }
5762
5763 static int start_dirty_degraded;
5764 static void active_io_release(struct percpu_ref *ref)
5765 {
5766         struct mddev *mddev = container_of(ref, struct mddev, active_io);
5767
5768         wake_up(&mddev->sb_wait);
5769 }
5770
5771 int md_run(struct mddev *mddev)
5772 {
5773         int err;
5774         struct md_rdev *rdev;
5775         struct md_personality *pers;
5776         bool nowait = true;
5777
5778         if (list_empty(&mddev->disks))
5779                 /* cannot run an array with no devices.. */
5780                 return -EINVAL;
5781
5782         if (mddev->pers)
5783                 return -EBUSY;
5784         /* Cannot run until previous stop completes properly */
5785         if (mddev->sysfs_active)
5786                 return -EBUSY;
5787
5788         /*
5789          * Analyze all RAID superblock(s)
5790          */
5791         if (!mddev->raid_disks) {
5792                 if (!mddev->persistent)
5793                         return -EINVAL;
5794                 err = analyze_sbs(mddev);
5795                 if (err)
5796                         return -EINVAL;
5797         }
5798
5799         if (mddev->level != LEVEL_NONE)
5800                 request_module("md-level-%d", mddev->level);
5801         else if (mddev->clevel[0])
5802                 request_module("md-%s", mddev->clevel);
5803
5804         /*
5805          * Drop all container device buffers, from now on
5806          * the only valid external interface is through the md
5807          * device.
5808          */
5809         mddev->has_superblocks = false;
5810         rdev_for_each(rdev, mddev) {
5811                 if (test_bit(Faulty, &rdev->flags))
5812                         continue;
5813                 sync_blockdev(rdev->bdev);
5814                 invalidate_bdev(rdev->bdev);
5815                 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
5816                         mddev->ro = MD_RDONLY;
5817                         if (mddev->gendisk)
5818                                 set_disk_ro(mddev->gendisk, 1);
5819                 }
5820
5821                 if (rdev->sb_page)
5822                         mddev->has_superblocks = true;
5823
5824                 /* perform some consistency tests on the device.
5825                  * We don't want the data to overlap the metadata,
5826                  * Internal Bitmap issues have been handled elsewhere.
5827                  */
5828                 if (rdev->meta_bdev) {
5829                         /* Nothing to check */;
5830                 } else if (rdev->data_offset < rdev->sb_start) {
5831                         if (mddev->dev_sectors &&
5832                             rdev->data_offset + mddev->dev_sectors
5833                             > rdev->sb_start) {
5834                                 pr_warn("md: %s: data overlaps metadata\n",
5835                                         mdname(mddev));
5836                                 return -EINVAL;
5837                         }
5838                 } else {
5839                         if (rdev->sb_start + rdev->sb_size/512
5840                             > rdev->data_offset) {
5841                                 pr_warn("md: %s: metadata overlaps data\n",
5842                                         mdname(mddev));
5843                                 return -EINVAL;
5844                         }
5845                 }
5846                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5847                 nowait = nowait && bdev_nowait(rdev->bdev);
5848         }
5849
5850         err = percpu_ref_init(&mddev->active_io, active_io_release,
5851                                 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
5852         if (err)
5853                 return err;
5854
5855         if (!bioset_initialized(&mddev->bio_set)) {
5856                 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5857                 if (err)
5858                         goto exit_active_io;
5859         }
5860         if (!bioset_initialized(&mddev->sync_set)) {
5861                 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
5862                 if (err)
5863                         goto exit_bio_set;
5864         }
5865
5866         spin_lock(&pers_lock);
5867         pers = find_pers(mddev->level, mddev->clevel);
5868         if (!pers || !try_module_get(pers->owner)) {
5869                 spin_unlock(&pers_lock);
5870                 if (mddev->level != LEVEL_NONE)
5871                         pr_warn("md: personality for level %d is not loaded!\n",
5872                                 mddev->level);
5873                 else
5874                         pr_warn("md: personality for level %s is not loaded!\n",
5875                                 mddev->clevel);
5876                 err = -EINVAL;
5877                 goto abort;
5878         }
5879         spin_unlock(&pers_lock);
5880         if (mddev->level != pers->level) {
5881                 mddev->level = pers->level;
5882                 mddev->new_level = pers->level;
5883         }
5884         strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5885
5886         if (mddev->reshape_position != MaxSector &&
5887             pers->start_reshape == NULL) {
5888                 /* This personality cannot handle reshaping... */
5889                 module_put(pers->owner);
5890                 err = -EINVAL;
5891                 goto abort;
5892         }
5893
5894         if (pers->sync_request) {
5895                 /* Warn if this is a potentially silly
5896                  * configuration.
5897                  */
5898                 struct md_rdev *rdev2;
5899                 int warned = 0;
5900
5901                 rdev_for_each(rdev, mddev)
5902                         rdev_for_each(rdev2, mddev) {
5903                                 if (rdev < rdev2 &&
5904                                     rdev->bdev->bd_disk ==
5905                                     rdev2->bdev->bd_disk) {
5906                                         pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n",
5907                                                 mdname(mddev),
5908                                                 rdev->bdev,
5909                                                 rdev2->bdev);
5910                                         warned = 1;
5911                                 }
5912                         }
5913
5914                 if (warned)
5915                         pr_warn("True protection against single-disk failure might be compromised.\n");
5916         }
5917
5918         mddev->recovery = 0;
5919         /* may be over-ridden by personality */
5920         mddev->resync_max_sectors = mddev->dev_sectors;
5921
5922         mddev->ok_start_degraded = start_dirty_degraded;
5923
5924         if (start_readonly && md_is_rdwr(mddev))
5925                 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */
5926
5927         err = pers->run(mddev);
5928         if (err)
5929                 pr_warn("md: pers->run() failed ...\n");
5930         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5931                 WARN_ONCE(!mddev->external_size,
5932                           "%s: default size too small, but 'external_size' not in effect?\n",
5933                           __func__);
5934                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5935                         (unsigned long long)mddev->array_sectors / 2,
5936                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5937                 err = -EINVAL;
5938         }
5939         if (err == 0 && pers->sync_request &&
5940             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5941                 struct bitmap *bitmap;
5942
5943                 bitmap = md_bitmap_create(mddev, -1);
5944                 if (IS_ERR(bitmap)) {
5945                         err = PTR_ERR(bitmap);
5946                         pr_warn("%s: failed to create bitmap (%d)\n",
5947                                 mdname(mddev), err);
5948                 } else
5949                         mddev->bitmap = bitmap;
5950
5951         }
5952         if (err)
5953                 goto bitmap_abort;
5954
5955         if (mddev->bitmap_info.max_write_behind > 0) {
5956                 bool create_pool = false;
5957
5958                 rdev_for_each(rdev, mddev) {
5959                         if (test_bit(WriteMostly, &rdev->flags) &&
5960                             rdev_init_serial(rdev))
5961                                 create_pool = true;
5962                 }
5963                 if (create_pool && mddev->serial_info_pool == NULL) {
5964                         mddev->serial_info_pool =
5965                                 mempool_create_kmalloc_pool(NR_SERIAL_INFOS,
5966                                                     sizeof(struct serial_info));
5967                         if (!mddev->serial_info_pool) {
5968                                 err = -ENOMEM;
5969                                 goto bitmap_abort;
5970                         }
5971                 }
5972         }
5973
5974         if (mddev->queue) {
5975                 bool nonrot = true;
5976
5977                 rdev_for_each(rdev, mddev) {
5978                         if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
5979                                 nonrot = false;
5980                                 break;
5981                         }
5982                 }
5983                 if (mddev->degraded)
5984                         nonrot = false;
5985                 if (nonrot)
5986                         blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
5987                 else
5988                         blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
5989                 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
5990
5991                 /* Set the NOWAIT flags if all underlying devices support it */
5992                 if (nowait)
5993                         blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
5994         }
5995         if (pers->sync_request) {
5996                 if (mddev->kobj.sd &&
5997                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5998                         pr_warn("md: cannot register extra attributes for %s\n",
5999                                 mdname(mddev));
6000                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
6001                 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed");
6002                 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded");
6003         } else if (mddev->ro == MD_AUTO_READ)
6004                 mddev->ro = MD_RDWR;
6005
6006         atomic_set(&mddev->max_corr_read_errors,
6007                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
6008         mddev->safemode = 0;
6009         if (mddev_is_clustered(mddev))
6010                 mddev->safemode_delay = 0;
6011         else
6012                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
6013         mddev->in_sync = 1;
6014         smp_wmb();
6015         spin_lock(&mddev->lock);
6016         mddev->pers = pers;
6017         spin_unlock(&mddev->lock);
6018         rdev_for_each(rdev, mddev)
6019                 if (rdev->raid_disk >= 0)
6020                         sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6021
6022         if (mddev->degraded && md_is_rdwr(mddev))
6023                 /* This ensures that recovering status is reported immediately
6024                  * via sysfs - until a lack of spares is confirmed.
6025                  */
6026                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6027         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6028
6029         if (mddev->sb_flags)
6030                 md_update_sb(mddev, 0);
6031
6032         md_new_event();
6033         return 0;
6034
6035 bitmap_abort:
6036         mddev_detach(mddev);
6037         if (mddev->private)
6038                 pers->free(mddev, mddev->private);
6039         mddev->private = NULL;
6040         module_put(pers->owner);
6041         md_bitmap_destroy(mddev);
6042 abort:
6043         bioset_exit(&mddev->sync_set);
6044 exit_bio_set:
6045         bioset_exit(&mddev->bio_set);
6046 exit_active_io:
6047         percpu_ref_exit(&mddev->active_io);
6048         return err;
6049 }
6050 EXPORT_SYMBOL_GPL(md_run);
6051
6052 int do_md_run(struct mddev *mddev)
6053 {
6054         int err;
6055
6056         set_bit(MD_NOT_READY, &mddev->flags);
6057         err = md_run(mddev);
6058         if (err)
6059                 goto out;
6060         err = md_bitmap_load(mddev);
6061         if (err) {
6062                 md_bitmap_destroy(mddev);
6063                 goto out;
6064         }
6065
6066         if (mddev_is_clustered(mddev))
6067                 md_allow_write(mddev);
6068
6069         /* run start up tasks that require md_thread */
6070         md_start(mddev);
6071
6072         md_wakeup_thread(mddev->thread);
6073         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
6074
6075         set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
6076         clear_bit(MD_NOT_READY, &mddev->flags);
6077         mddev->changed = 1;
6078         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
6079         sysfs_notify_dirent_safe(mddev->sysfs_state);
6080         sysfs_notify_dirent_safe(mddev->sysfs_action);
6081         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
6082 out:
6083         clear_bit(MD_NOT_READY, &mddev->flags);
6084         return err;
6085 }
6086
6087 int md_start(struct mddev *mddev)
6088 {
6089         int ret = 0;
6090
6091         if (mddev->pers->start) {
6092                 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6093                 md_wakeup_thread(mddev->thread);
6094                 ret = mddev->pers->start(mddev);
6095                 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
6096                 md_wakeup_thread(mddev->sync_thread);
6097         }
6098         return ret;
6099 }
6100 EXPORT_SYMBOL_GPL(md_start);
6101
6102 static int restart_array(struct mddev *mddev)
6103 {
6104         struct gendisk *disk = mddev->gendisk;
6105         struct md_rdev *rdev;
6106         bool has_journal = false;
6107         bool has_readonly = false;
6108
6109         /* Complain if it has no devices */
6110         if (list_empty(&mddev->disks))
6111                 return -ENXIO;
6112         if (!mddev->pers)
6113                 return -EINVAL;
6114         if (md_is_rdwr(mddev))
6115                 return -EBUSY;
6116
6117         rcu_read_lock();
6118         rdev_for_each_rcu(rdev, mddev) {
6119                 if (test_bit(Journal, &rdev->flags) &&
6120                     !test_bit(Faulty, &rdev->flags))
6121                         has_journal = true;
6122                 if (rdev_read_only(rdev))
6123                         has_readonly = true;
6124         }
6125         rcu_read_unlock();
6126         if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
6127                 /* Don't restart rw with journal missing/faulty */
6128                         return -EINVAL;
6129         if (has_readonly)
6130                 return -EROFS;
6131
6132         mddev->safemode = 0;
6133         mddev->ro = MD_RDWR;
6134         set_disk_ro(disk, 0);
6135         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
6136         /* Kick recovery or resync if necessary */
6137         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6138         md_wakeup_thread(mddev->thread);
6139         md_wakeup_thread(mddev->sync_thread);
6140         sysfs_notify_dirent_safe(mddev->sysfs_state);
6141         return 0;
6142 }
6143
6144 static void md_clean(struct mddev *mddev)
6145 {
6146         mddev->array_sectors = 0;
6147         mddev->external_size = 0;
6148         mddev->dev_sectors = 0;
6149         mddev->raid_disks = 0;
6150         mddev->recovery_cp = 0;
6151         mddev->resync_min = 0;
6152         mddev->resync_max = MaxSector;
6153         mddev->reshape_position = MaxSector;
6154         /* we still need mddev->external in export_rdev, do not clear it yet */
6155         mddev->persistent = 0;
6156         mddev->level = LEVEL_NONE;
6157         mddev->clevel[0] = 0;
6158         mddev->flags = 0;
6159         mddev->sb_flags = 0;
6160         mddev->ro = MD_RDWR;
6161         mddev->metadata_type[0] = 0;
6162         mddev->chunk_sectors = 0;
6163         mddev->ctime = mddev->utime = 0;
6164         mddev->layout = 0;
6165         mddev->max_disks = 0;
6166         mddev->events = 0;
6167         mddev->can_decrease_events = 0;
6168         mddev->delta_disks = 0;
6169         mddev->reshape_backwards = 0;
6170         mddev->new_level = LEVEL_NONE;
6171         mddev->new_layout = 0;
6172         mddev->new_chunk_sectors = 0;
6173         mddev->curr_resync = MD_RESYNC_NONE;
6174         atomic64_set(&mddev->resync_mismatches, 0);
6175         mddev->suspend_lo = mddev->suspend_hi = 0;
6176         mddev->sync_speed_min = mddev->sync_speed_max = 0;
6177         mddev->recovery = 0;
6178         mddev->in_sync = 0;
6179         mddev->changed = 0;
6180         mddev->degraded = 0;
6181         mddev->safemode = 0;
6182         mddev->private = NULL;
6183         mddev->cluster_info = NULL;
6184         mddev->bitmap_info.offset = 0;
6185         mddev->bitmap_info.default_offset = 0;
6186         mddev->bitmap_info.default_space = 0;
6187         mddev->bitmap_info.chunksize = 0;
6188         mddev->bitmap_info.daemon_sleep = 0;
6189         mddev->bitmap_info.max_write_behind = 0;
6190         mddev->bitmap_info.nodes = 0;
6191 }
6192
6193 static void __md_stop_writes(struct mddev *mddev)
6194 {
6195         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6196         if (work_pending(&mddev->del_work))
6197                 flush_workqueue(md_misc_wq);
6198         if (mddev->sync_thread) {
6199                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6200                 md_reap_sync_thread(mddev);
6201         }
6202
6203         del_timer_sync(&mddev->safemode_timer);
6204
6205         if (mddev->pers && mddev->pers->quiesce) {
6206                 mddev->pers->quiesce(mddev, 1);
6207                 mddev->pers->quiesce(mddev, 0);
6208         }
6209         md_bitmap_flush(mddev);
6210
6211         if (md_is_rdwr(mddev) &&
6212             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
6213              mddev->sb_flags)) {
6214                 /* mark array as shutdown cleanly */
6215                 if (!mddev_is_clustered(mddev))
6216                         mddev->in_sync = 1;
6217                 md_update_sb(mddev, 1);
6218         }
6219         /* disable policy to guarantee rdevs free resources for serialization */
6220         mddev->serialize_policy = 0;
6221         mddev_destroy_serial_pool(mddev, NULL, true);
6222 }
6223
6224 void md_stop_writes(struct mddev *mddev)
6225 {
6226         mddev_lock_nointr(mddev);
6227         __md_stop_writes(mddev);
6228         mddev_unlock(mddev);
6229 }
6230 EXPORT_SYMBOL_GPL(md_stop_writes);
6231
6232 static void mddev_detach(struct mddev *mddev)
6233 {
6234         md_bitmap_wait_behind_writes(mddev);
6235         if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
6236                 mddev->pers->quiesce(mddev, 1);
6237                 mddev->pers->quiesce(mddev, 0);
6238         }
6239         md_unregister_thread(&mddev->thread);
6240         if (mddev->queue)
6241                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
6242 }
6243
6244 static void __md_stop(struct mddev *mddev)
6245 {
6246         struct md_personality *pers = mddev->pers;
6247         md_bitmap_destroy(mddev);
6248         mddev_detach(mddev);
6249         /* Ensure ->event_work is done */
6250         if (mddev->event_work.func)
6251                 flush_workqueue(md_misc_wq);
6252         spin_lock(&mddev->lock);
6253         mddev->pers = NULL;
6254         spin_unlock(&mddev->lock);
6255         if (mddev->private)
6256                 pers->free(mddev, mddev->private);
6257         mddev->private = NULL;
6258         if (pers->sync_request && mddev->to_remove == NULL)
6259                 mddev->to_remove = &md_redundancy_group;
6260         module_put(pers->owner);
6261         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6262
6263         percpu_ref_exit(&mddev->active_io);
6264         bioset_exit(&mddev->bio_set);
6265         bioset_exit(&mddev->sync_set);
6266 }
6267
6268 void md_stop(struct mddev *mddev)
6269 {
6270         /* stop the array and free an attached data structures.
6271          * This is called from dm-raid
6272          */
6273         __md_stop_writes(mddev);
6274         __md_stop(mddev);
6275         percpu_ref_exit(&mddev->writes_pending);
6276 }
6277
6278 EXPORT_SYMBOL_GPL(md_stop);
6279
6280 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
6281 {
6282         int err = 0;
6283         int did_freeze = 0;
6284
6285         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6286                 did_freeze = 1;
6287                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6288                 md_wakeup_thread(mddev->thread);
6289         }
6290         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6291                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6292
6293         /*
6294          * Thread might be blocked waiting for metadata update which will now
6295          * never happen
6296          */
6297         md_wakeup_thread_directly(mddev->sync_thread);
6298
6299         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
6300                 return -EBUSY;
6301         mddev_unlock(mddev);
6302         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
6303                                           &mddev->recovery));
6304         wait_event(mddev->sb_wait,
6305                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
6306         mddev_lock_nointr(mddev);
6307
6308         mutex_lock(&mddev->open_mutex);
6309         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6310             mddev->sync_thread ||
6311             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6312                 pr_warn("md: %s still in use.\n",mdname(mddev));
6313                 if (did_freeze) {
6314                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6315                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6316                         md_wakeup_thread(mddev->thread);
6317                 }
6318                 err = -EBUSY;
6319                 goto out;
6320         }
6321         if (mddev->pers) {
6322                 __md_stop_writes(mddev);
6323
6324                 err  = -ENXIO;
6325                 if (mddev->ro == MD_RDONLY)
6326                         goto out;
6327                 mddev->ro = MD_RDONLY;
6328                 set_disk_ro(mddev->gendisk, 1);
6329                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6330                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6331                 md_wakeup_thread(mddev->thread);
6332                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6333                 err = 0;
6334         }
6335 out:
6336         mutex_unlock(&mddev->open_mutex);
6337         return err;
6338 }
6339
6340 /* mode:
6341  *   0 - completely stop and dis-assemble array
6342  *   2 - stop but do not disassemble array
6343  */
6344 static int do_md_stop(struct mddev *mddev, int mode,
6345                       struct block_device *bdev)
6346 {
6347         struct gendisk *disk = mddev->gendisk;
6348         struct md_rdev *rdev;
6349         int did_freeze = 0;
6350
6351         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
6352                 did_freeze = 1;
6353                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6354                 md_wakeup_thread(mddev->thread);
6355         }
6356         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
6357                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
6358
6359         /*
6360          * Thread might be blocked waiting for metadata update which will now
6361          * never happen
6362          */
6363         md_wakeup_thread_directly(mddev->sync_thread);
6364
6365         mddev_unlock(mddev);
6366         wait_event(resync_wait, (mddev->sync_thread == NULL &&
6367                                  !test_bit(MD_RECOVERY_RUNNING,
6368                                            &mddev->recovery)));
6369         mddev_lock_nointr(mddev);
6370
6371         mutex_lock(&mddev->open_mutex);
6372         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
6373             mddev->sysfs_active ||
6374             mddev->sync_thread ||
6375             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
6376                 pr_warn("md: %s still in use.\n",mdname(mddev));
6377                 mutex_unlock(&mddev->open_mutex);
6378                 if (did_freeze) {
6379                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6380                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6381                         md_wakeup_thread(mddev->thread);
6382                 }
6383                 return -EBUSY;
6384         }
6385         if (mddev->pers) {
6386                 if (!md_is_rdwr(mddev))
6387                         set_disk_ro(disk, 0);
6388
6389                 __md_stop_writes(mddev);
6390                 __md_stop(mddev);
6391
6392                 /* tell userspace to handle 'inactive' */
6393                 sysfs_notify_dirent_safe(mddev->sysfs_state);
6394
6395                 rdev_for_each(rdev, mddev)
6396                         if (rdev->raid_disk >= 0)
6397                                 sysfs_unlink_rdev(mddev, rdev);
6398
6399                 set_capacity_and_notify(disk, 0);
6400                 mutex_unlock(&mddev->open_mutex);
6401                 mddev->changed = 1;
6402
6403                 if (!md_is_rdwr(mddev))
6404                         mddev->ro = MD_RDWR;
6405         } else
6406                 mutex_unlock(&mddev->open_mutex);
6407         /*
6408          * Free resources if final stop
6409          */
6410         if (mode == 0) {
6411                 pr_info("md: %s stopped.\n", mdname(mddev));
6412
6413                 if (mddev->bitmap_info.file) {
6414                         struct file *f = mddev->bitmap_info.file;
6415                         spin_lock(&mddev->lock);
6416                         mddev->bitmap_info.file = NULL;
6417                         spin_unlock(&mddev->lock);
6418                         fput(f);
6419                 }
6420                 mddev->bitmap_info.offset = 0;
6421
6422                 export_array(mddev);
6423
6424                 md_clean(mddev);
6425                 if (mddev->hold_active == UNTIL_STOP)
6426                         mddev->hold_active = 0;
6427         }
6428         md_new_event();
6429         sysfs_notify_dirent_safe(mddev->sysfs_state);
6430         return 0;
6431 }
6432
6433 #ifndef MODULE
6434 static void autorun_array(struct mddev *mddev)
6435 {
6436         struct md_rdev *rdev;
6437         int err;
6438
6439         if (list_empty(&mddev->disks))
6440                 return;
6441
6442         pr_info("md: running: ");
6443
6444         rdev_for_each(rdev, mddev) {
6445                 pr_cont("<%pg>", rdev->bdev);
6446         }
6447         pr_cont("\n");
6448
6449         err = do_md_run(mddev);
6450         if (err) {
6451                 pr_warn("md: do_md_run() returned %d\n", err);
6452                 do_md_stop(mddev, 0, NULL);
6453         }
6454 }
6455
6456 /*
6457  * lets try to run arrays based on all disks that have arrived
6458  * until now. (those are in pending_raid_disks)
6459  *
6460  * the method: pick the first pending disk, collect all disks with
6461  * the same UUID, remove all from the pending list and put them into
6462  * the 'same_array' list. Then order this list based on superblock
6463  * update time (freshest comes first), kick out 'old' disks and
6464  * compare superblocks. If everything's fine then run it.
6465  *
6466  * If "unit" is allocated, then bump its reference count
6467  */
6468 static void autorun_devices(int part)
6469 {
6470         struct md_rdev *rdev0, *rdev, *tmp;
6471         struct mddev *mddev;
6472
6473         pr_info("md: autorun ...\n");
6474         while (!list_empty(&pending_raid_disks)) {
6475                 int unit;
6476                 dev_t dev;
6477                 LIST_HEAD(candidates);
6478                 rdev0 = list_entry(pending_raid_disks.next,
6479                                          struct md_rdev, same_set);
6480
6481                 pr_debug("md: considering %pg ...\n", rdev0->bdev);
6482                 INIT_LIST_HEAD(&candidates);
6483                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6484                         if (super_90_load(rdev, rdev0, 0) >= 0) {
6485                                 pr_debug("md:  adding %pg ...\n",
6486                                          rdev->bdev);
6487                                 list_move(&rdev->same_set, &candidates);
6488                         }
6489                 /*
6490                  * now we have a set of devices, with all of them having
6491                  * mostly sane superblocks. It's time to allocate the
6492                  * mddev.
6493                  */
6494                 if (part) {
6495                         dev = MKDEV(mdp_major,
6496                                     rdev0->preferred_minor << MdpMinorShift);
6497                         unit = MINOR(dev) >> MdpMinorShift;
6498                 } else {
6499                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
6500                         unit = MINOR(dev);
6501                 }
6502                 if (rdev0->preferred_minor != unit) {
6503                         pr_warn("md: unit number in %pg is bad: %d\n",
6504                                 rdev0->bdev, rdev0->preferred_minor);
6505                         break;
6506                 }
6507
6508                 mddev = md_alloc(dev, NULL);
6509                 if (IS_ERR(mddev))
6510                         break;
6511
6512                 if (mddev_lock(mddev))
6513                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
6514                 else if (mddev->raid_disks || mddev->major_version
6515                          || !list_empty(&mddev->disks)) {
6516                         pr_warn("md: %s already running, cannot run %pg\n",
6517                                 mdname(mddev), rdev0->bdev);
6518                         mddev_unlock(mddev);
6519                 } else {
6520                         pr_debug("md: created %s\n", mdname(mddev));
6521                         mddev->persistent = 1;
6522                         rdev_for_each_list(rdev, tmp, &candidates) {
6523                                 list_del_init(&rdev->same_set);
6524                                 if (bind_rdev_to_array(rdev, mddev))
6525                                         export_rdev(rdev, mddev);
6526                         }
6527                         autorun_array(mddev);
6528                         mddev_unlock(mddev);
6529                 }
6530                 /* on success, candidates will be empty, on error
6531                  * it won't...
6532                  */
6533                 rdev_for_each_list(rdev, tmp, &candidates) {
6534                         list_del_init(&rdev->same_set);
6535                         export_rdev(rdev, mddev);
6536                 }
6537                 mddev_put(mddev);
6538         }
6539         pr_info("md: ... autorun DONE.\n");
6540 }
6541 #endif /* !MODULE */
6542
6543 static int get_version(void __user *arg)
6544 {
6545         mdu_version_t ver;
6546
6547         ver.major = MD_MAJOR_VERSION;
6548         ver.minor = MD_MINOR_VERSION;
6549         ver.patchlevel = MD_PATCHLEVEL_VERSION;
6550
6551         if (copy_to_user(arg, &ver, sizeof(ver)))
6552                 return -EFAULT;
6553
6554         return 0;
6555 }
6556
6557 static int get_array_info(struct mddev *mddev, void __user *arg)
6558 {
6559         mdu_array_info_t info;
6560         int nr,working,insync,failed,spare;
6561         struct md_rdev *rdev;
6562
6563         nr = working = insync = failed = spare = 0;
6564         rcu_read_lock();
6565         rdev_for_each_rcu(rdev, mddev) {
6566                 nr++;
6567                 if (test_bit(Faulty, &rdev->flags))
6568                         failed++;
6569                 else {
6570                         working++;
6571                         if (test_bit(In_sync, &rdev->flags))
6572                                 insync++;
6573                         else if (test_bit(Journal, &rdev->flags))
6574                                 /* TODO: add journal count to md_u.h */
6575                                 ;
6576                         else
6577                                 spare++;
6578                 }
6579         }
6580         rcu_read_unlock();
6581
6582         info.major_version = mddev->major_version;
6583         info.minor_version = mddev->minor_version;
6584         info.patch_version = MD_PATCHLEVEL_VERSION;
6585         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
6586         info.level         = mddev->level;
6587         info.size          = mddev->dev_sectors / 2;
6588         if (info.size != mddev->dev_sectors / 2) /* overflow */
6589                 info.size = -1;
6590         info.nr_disks      = nr;
6591         info.raid_disks    = mddev->raid_disks;
6592         info.md_minor      = mddev->md_minor;
6593         info.not_persistent= !mddev->persistent;
6594
6595         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
6596         info.state         = 0;
6597         if (mddev->in_sync)
6598                 info.state = (1<<MD_SB_CLEAN);
6599         if (mddev->bitmap && mddev->bitmap_info.offset)
6600                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
6601         if (mddev_is_clustered(mddev))
6602                 info.state |= (1<<MD_SB_CLUSTERED);
6603         info.active_disks  = insync;
6604         info.working_disks = working;
6605         info.failed_disks  = failed;
6606         info.spare_disks   = spare;
6607
6608         info.layout        = mddev->layout;
6609         info.chunk_size    = mddev->chunk_sectors << 9;
6610
6611         if (copy_to_user(arg, &info, sizeof(info)))
6612                 return -EFAULT;
6613
6614         return 0;
6615 }
6616
6617 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
6618 {
6619         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
6620         char *ptr;
6621         int err;
6622
6623         file = kzalloc(sizeof(*file), GFP_NOIO);
6624         if (!file)
6625                 return -ENOMEM;
6626
6627         err = 0;
6628         spin_lock(&mddev->lock);
6629         /* bitmap enabled */
6630         if (mddev->bitmap_info.file) {
6631                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
6632                                 sizeof(file->pathname));
6633                 if (IS_ERR(ptr))
6634                         err = PTR_ERR(ptr);
6635                 else
6636                         memmove(file->pathname, ptr,
6637                                 sizeof(file->pathname)-(ptr-file->pathname));
6638         }
6639         spin_unlock(&mddev->lock);
6640
6641         if (err == 0 &&
6642             copy_to_user(arg, file, sizeof(*file)))
6643                 err = -EFAULT;
6644
6645         kfree(file);
6646         return err;
6647 }
6648
6649 static int get_disk_info(struct mddev *mddev, void __user * arg)
6650 {
6651         mdu_disk_info_t info;
6652         struct md_rdev *rdev;
6653
6654         if (copy_from_user(&info, arg, sizeof(info)))
6655                 return -EFAULT;
6656
6657         rcu_read_lock();
6658         rdev = md_find_rdev_nr_rcu(mddev, info.number);
6659         if (rdev) {
6660                 info.major = MAJOR(rdev->bdev->bd_dev);
6661                 info.minor = MINOR(rdev->bdev->bd_dev);
6662                 info.raid_disk = rdev->raid_disk;
6663                 info.state = 0;
6664                 if (test_bit(Faulty, &rdev->flags))
6665                         info.state |= (1<<MD_DISK_FAULTY);
6666                 else if (test_bit(In_sync, &rdev->flags)) {
6667                         info.state |= (1<<MD_DISK_ACTIVE);
6668                         info.state |= (1<<MD_DISK_SYNC);
6669                 }
6670                 if (test_bit(Journal, &rdev->flags))
6671                         info.state |= (1<<MD_DISK_JOURNAL);
6672                 if (test_bit(WriteMostly, &rdev->flags))
6673                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
6674                 if (test_bit(FailFast, &rdev->flags))
6675                         info.state |= (1<<MD_DISK_FAILFAST);
6676         } else {
6677                 info.major = info.minor = 0;
6678                 info.raid_disk = -1;
6679                 info.state = (1<<MD_DISK_REMOVED);
6680         }
6681         rcu_read_unlock();
6682
6683         if (copy_to_user(arg, &info, sizeof(info)))
6684                 return -EFAULT;
6685
6686         return 0;
6687 }
6688
6689 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
6690 {
6691         struct md_rdev *rdev;
6692         dev_t dev = MKDEV(info->major,info->minor);
6693
6694         if (mddev_is_clustered(mddev) &&
6695                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6696                 pr_warn("%s: Cannot add to clustered mddev.\n",
6697                         mdname(mddev));
6698                 return -EINVAL;
6699         }
6700
6701         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6702                 return -EOVERFLOW;
6703
6704         if (!mddev->raid_disks) {
6705                 int err;
6706                 /* expecting a device which has a superblock */
6707                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6708                 if (IS_ERR(rdev)) {
6709                         pr_warn("md: md_import_device returned %ld\n",
6710                                 PTR_ERR(rdev));
6711                         return PTR_ERR(rdev);
6712                 }
6713                 if (!list_empty(&mddev->disks)) {
6714                         struct md_rdev *rdev0
6715                                 = list_entry(mddev->disks.next,
6716                                              struct md_rdev, same_set);
6717                         err = super_types[mddev->major_version]
6718                                 .load_super(rdev, rdev0, mddev->minor_version);
6719                         if (err < 0) {
6720                                 pr_warn("md: %pg has different UUID to %pg\n",
6721                                         rdev->bdev,
6722                                         rdev0->bdev);
6723                                 export_rdev(rdev, mddev);
6724                                 return -EINVAL;
6725                         }
6726                 }
6727                 err = bind_rdev_to_array(rdev, mddev);
6728                 if (err)
6729                         export_rdev(rdev, mddev);
6730                 return err;
6731         }
6732
6733         /*
6734          * md_add_new_disk can be used once the array is assembled
6735          * to add "hot spares".  They must already have a superblock
6736          * written
6737          */
6738         if (mddev->pers) {
6739                 int err;
6740                 if (!mddev->pers->hot_add_disk) {
6741                         pr_warn("%s: personality does not support diskops!\n",
6742                                 mdname(mddev));
6743                         return -EINVAL;
6744                 }
6745                 if (mddev->persistent)
6746                         rdev = md_import_device(dev, mddev->major_version,
6747                                                 mddev->minor_version);
6748                 else
6749                         rdev = md_import_device(dev, -1, -1);
6750                 if (IS_ERR(rdev)) {
6751                         pr_warn("md: md_import_device returned %ld\n",
6752                                 PTR_ERR(rdev));
6753                         return PTR_ERR(rdev);
6754                 }
6755                 /* set saved_raid_disk if appropriate */
6756                 if (!mddev->persistent) {
6757                         if (info->state & (1<<MD_DISK_SYNC)  &&
6758                             info->raid_disk < mddev->raid_disks) {
6759                                 rdev->raid_disk = info->raid_disk;
6760                                 clear_bit(Bitmap_sync, &rdev->flags);
6761                         } else
6762                                 rdev->raid_disk = -1;
6763                         rdev->saved_raid_disk = rdev->raid_disk;
6764                 } else
6765                         super_types[mddev->major_version].
6766                                 validate_super(mddev, rdev);
6767                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6768                      rdev->raid_disk != info->raid_disk) {
6769                         /* This was a hot-add request, but events doesn't
6770                          * match, so reject it.
6771                          */
6772                         export_rdev(rdev, mddev);
6773                         return -EINVAL;
6774                 }
6775
6776                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6777                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6778                         set_bit(WriteMostly, &rdev->flags);
6779                 else
6780                         clear_bit(WriteMostly, &rdev->flags);
6781                 if (info->state & (1<<MD_DISK_FAILFAST))
6782                         set_bit(FailFast, &rdev->flags);
6783                 else
6784                         clear_bit(FailFast, &rdev->flags);
6785
6786                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6787                         struct md_rdev *rdev2;
6788                         bool has_journal = false;
6789
6790                         /* make sure no existing journal disk */
6791                         rdev_for_each(rdev2, mddev) {
6792                                 if (test_bit(Journal, &rdev2->flags)) {
6793                                         has_journal = true;
6794                                         break;
6795                                 }
6796                         }
6797                         if (has_journal || mddev->bitmap) {
6798                                 export_rdev(rdev, mddev);
6799                                 return -EBUSY;
6800                         }
6801                         set_bit(Journal, &rdev->flags);
6802                 }
6803                 /*
6804                  * check whether the device shows up in other nodes
6805                  */
6806                 if (mddev_is_clustered(mddev)) {
6807                         if (info->state & (1 << MD_DISK_CANDIDATE))
6808                                 set_bit(Candidate, &rdev->flags);
6809                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6810                                 /* --add initiated by this node */
6811                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6812                                 if (err) {
6813                                         export_rdev(rdev, mddev);
6814                                         return err;
6815                                 }
6816                         }
6817                 }
6818
6819                 rdev->raid_disk = -1;
6820                 err = bind_rdev_to_array(rdev, mddev);
6821
6822                 if (err)
6823                         export_rdev(rdev, mddev);
6824
6825                 if (mddev_is_clustered(mddev)) {
6826                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6827                                 if (!err) {
6828                                         err = md_cluster_ops->new_disk_ack(mddev,
6829                                                 err == 0);
6830                                         if (err)
6831                                                 md_kick_rdev_from_array(rdev);
6832                                 }
6833                         } else {
6834                                 if (err)
6835                                         md_cluster_ops->add_new_disk_cancel(mddev);
6836                                 else
6837                                         err = add_bound_rdev(rdev);
6838                         }
6839
6840                 } else if (!err)
6841                         err = add_bound_rdev(rdev);
6842
6843                 return err;
6844         }
6845
6846         /* otherwise, md_add_new_disk is only allowed
6847          * for major_version==0 superblocks
6848          */
6849         if (mddev->major_version != 0) {
6850                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6851                 return -EINVAL;
6852         }
6853
6854         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6855                 int err;
6856                 rdev = md_import_device(dev, -1, 0);
6857                 if (IS_ERR(rdev)) {
6858                         pr_warn("md: error, md_import_device() returned %ld\n",
6859                                 PTR_ERR(rdev));
6860                         return PTR_ERR(rdev);
6861                 }
6862                 rdev->desc_nr = info->number;
6863                 if (info->raid_disk < mddev->raid_disks)
6864                         rdev->raid_disk = info->raid_disk;
6865                 else
6866                         rdev->raid_disk = -1;
6867
6868                 if (rdev->raid_disk < mddev->raid_disks)
6869                         if (info->state & (1<<MD_DISK_SYNC))
6870                                 set_bit(In_sync, &rdev->flags);
6871
6872                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6873                         set_bit(WriteMostly, &rdev->flags);
6874                 if (info->state & (1<<MD_DISK_FAILFAST))
6875                         set_bit(FailFast, &rdev->flags);
6876
6877                 if (!mddev->persistent) {
6878                         pr_debug("md: nonpersistent superblock ...\n");
6879                         rdev->sb_start = bdev_nr_sectors(rdev->bdev);
6880                 } else
6881                         rdev->sb_start = calc_dev_sboffset(rdev);
6882                 rdev->sectors = rdev->sb_start;
6883
6884                 err = bind_rdev_to_array(rdev, mddev);
6885                 if (err) {
6886                         export_rdev(rdev, mddev);
6887                         return err;
6888                 }
6889         }
6890
6891         return 0;
6892 }
6893
6894 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6895 {
6896         struct md_rdev *rdev;
6897
6898         if (!mddev->pers)
6899                 return -ENODEV;
6900
6901         rdev = find_rdev(mddev, dev);
6902         if (!rdev)
6903                 return -ENXIO;
6904
6905         if (rdev->raid_disk < 0)
6906                 goto kick_rdev;
6907
6908         clear_bit(Blocked, &rdev->flags);
6909         remove_and_add_spares(mddev, rdev);
6910
6911         if (rdev->raid_disk >= 0)
6912                 goto busy;
6913
6914 kick_rdev:
6915         if (mddev_is_clustered(mddev)) {
6916                 if (md_cluster_ops->remove_disk(mddev, rdev))
6917                         goto busy;
6918         }
6919
6920         md_kick_rdev_from_array(rdev);
6921         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6922         if (mddev->thread)
6923                 md_wakeup_thread(mddev->thread);
6924         else
6925                 md_update_sb(mddev, 1);
6926         md_new_event();
6927
6928         return 0;
6929 busy:
6930         pr_debug("md: cannot remove active disk %pg from %s ...\n",
6931                  rdev->bdev, mdname(mddev));
6932         return -EBUSY;
6933 }
6934
6935 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6936 {
6937         int err;
6938         struct md_rdev *rdev;
6939
6940         if (!mddev->pers)
6941                 return -ENODEV;
6942
6943         if (mddev->major_version != 0) {
6944                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6945                         mdname(mddev));
6946                 return -EINVAL;
6947         }
6948         if (!mddev->pers->hot_add_disk) {
6949                 pr_warn("%s: personality does not support diskops!\n",
6950                         mdname(mddev));
6951                 return -EINVAL;
6952         }
6953
6954         rdev = md_import_device(dev, -1, 0);
6955         if (IS_ERR(rdev)) {
6956                 pr_warn("md: error, md_import_device() returned %ld\n",
6957                         PTR_ERR(rdev));
6958                 return -EINVAL;
6959         }
6960
6961         if (mddev->persistent)
6962                 rdev->sb_start = calc_dev_sboffset(rdev);
6963         else
6964                 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
6965
6966         rdev->sectors = rdev->sb_start;
6967
6968         if (test_bit(Faulty, &rdev->flags)) {
6969                 pr_warn("md: can not hot-add faulty %pg disk to %s!\n",
6970                         rdev->bdev, mdname(mddev));
6971                 err = -EINVAL;
6972                 goto abort_export;
6973         }
6974
6975         clear_bit(In_sync, &rdev->flags);
6976         rdev->desc_nr = -1;
6977         rdev->saved_raid_disk = -1;
6978         err = bind_rdev_to_array(rdev, mddev);
6979         if (err)
6980                 goto abort_export;
6981
6982         /*
6983          * The rest should better be atomic, we can have disk failures
6984          * noticed in interrupt contexts ...
6985          */
6986
6987         rdev->raid_disk = -1;
6988
6989         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6990         if (!mddev->thread)
6991                 md_update_sb(mddev, 1);
6992         /*
6993          * If the new disk does not support REQ_NOWAIT,
6994          * disable on the whole MD.
6995          */
6996         if (!bdev_nowait(rdev->bdev)) {
6997                 pr_info("%s: Disabling nowait because %pg does not support nowait\n",
6998                         mdname(mddev), rdev->bdev);
6999                 blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
7000         }
7001         /*
7002          * Kick recovery, maybe this spare has to be added to the
7003          * array immediately.
7004          */
7005         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7006         md_wakeup_thread(mddev->thread);
7007         md_new_event();
7008         return 0;
7009
7010 abort_export:
7011         export_rdev(rdev, mddev);
7012         return err;
7013 }
7014
7015 static int set_bitmap_file(struct mddev *mddev, int fd)
7016 {
7017         int err = 0;
7018
7019         if (mddev->pers) {
7020                 if (!mddev->pers->quiesce || !mddev->thread)
7021                         return -EBUSY;
7022                 if (mddev->recovery || mddev->sync_thread)
7023                         return -EBUSY;
7024                 /* we should be able to change the bitmap.. */
7025         }
7026
7027         if (fd >= 0) {
7028                 struct inode *inode;
7029                 struct file *f;
7030
7031                 if (mddev->bitmap || mddev->bitmap_info.file)
7032                         return -EEXIST; /* cannot add when bitmap is present */
7033                 f = fget(fd);
7034
7035                 if (f == NULL) {
7036                         pr_warn("%s: error: failed to get bitmap file\n",
7037                                 mdname(mddev));
7038                         return -EBADF;
7039                 }
7040
7041                 inode = f->f_mapping->host;
7042                 if (!S_ISREG(inode->i_mode)) {
7043                         pr_warn("%s: error: bitmap file must be a regular file\n",
7044                                 mdname(mddev));
7045                         err = -EBADF;
7046                 } else if (!(f->f_mode & FMODE_WRITE)) {
7047                         pr_warn("%s: error: bitmap file must open for write\n",
7048                                 mdname(mddev));
7049                         err = -EBADF;
7050                 } else if (atomic_read(&inode->i_writecount) != 1) {
7051                         pr_warn("%s: error: bitmap file is already in use\n",
7052                                 mdname(mddev));
7053                         err = -EBUSY;
7054                 }
7055                 if (err) {
7056                         fput(f);
7057                         return err;
7058                 }
7059                 mddev->bitmap_info.file = f;
7060                 mddev->bitmap_info.offset = 0; /* file overrides offset */
7061         } else if (mddev->bitmap == NULL)
7062                 return -ENOENT; /* cannot remove what isn't there */
7063         err = 0;
7064         if (mddev->pers) {
7065                 if (fd >= 0) {
7066                         struct bitmap *bitmap;
7067
7068                         bitmap = md_bitmap_create(mddev, -1);
7069                         mddev_suspend(mddev);
7070                         if (!IS_ERR(bitmap)) {
7071                                 mddev->bitmap = bitmap;
7072                                 err = md_bitmap_load(mddev);
7073                         } else
7074                                 err = PTR_ERR(bitmap);
7075                         if (err) {
7076                                 md_bitmap_destroy(mddev);
7077                                 fd = -1;
7078                         }
7079                         mddev_resume(mddev);
7080                 } else if (fd < 0) {
7081                         mddev_suspend(mddev);
7082                         md_bitmap_destroy(mddev);
7083                         mddev_resume(mddev);
7084                 }
7085         }
7086         if (fd < 0) {
7087                 struct file *f = mddev->bitmap_info.file;
7088                 if (f) {
7089                         spin_lock(&mddev->lock);
7090                         mddev->bitmap_info.file = NULL;
7091                         spin_unlock(&mddev->lock);
7092                         fput(f);
7093                 }
7094         }
7095
7096         return err;
7097 }
7098
7099 /*
7100  * md_set_array_info is used two different ways
7101  * The original usage is when creating a new array.
7102  * In this usage, raid_disks is > 0 and it together with
7103  *  level, size, not_persistent,layout,chunksize determine the
7104  *  shape of the array.
7105  *  This will always create an array with a type-0.90.0 superblock.
7106  * The newer usage is when assembling an array.
7107  *  In this case raid_disks will be 0, and the major_version field is
7108  *  use to determine which style super-blocks are to be found on the devices.
7109  *  The minor and patch _version numbers are also kept incase the
7110  *  super_block handler wishes to interpret them.
7111  */
7112 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
7113 {
7114         if (info->raid_disks == 0) {
7115                 /* just setting version number for superblock loading */
7116                 if (info->major_version < 0 ||
7117                     info->major_version >= ARRAY_SIZE(super_types) ||
7118                     super_types[info->major_version].name == NULL) {
7119                         /* maybe try to auto-load a module? */
7120                         pr_warn("md: superblock version %d not known\n",
7121                                 info->major_version);
7122                         return -EINVAL;
7123                 }
7124                 mddev->major_version = info->major_version;
7125                 mddev->minor_version = info->minor_version;
7126                 mddev->patch_version = info->patch_version;
7127                 mddev->persistent = !info->not_persistent;
7128                 /* ensure mddev_put doesn't delete this now that there
7129                  * is some minimal configuration.
7130                  */
7131                 mddev->ctime         = ktime_get_real_seconds();
7132                 return 0;
7133         }
7134         mddev->major_version = MD_MAJOR_VERSION;
7135         mddev->minor_version = MD_MINOR_VERSION;
7136         mddev->patch_version = MD_PATCHLEVEL_VERSION;
7137         mddev->ctime         = ktime_get_real_seconds();
7138
7139         mddev->level         = info->level;
7140         mddev->clevel[0]     = 0;
7141         mddev->dev_sectors   = 2 * (sector_t)info->size;
7142         mddev->raid_disks    = info->raid_disks;
7143         /* don't set md_minor, it is determined by which /dev/md* was
7144          * openned
7145          */
7146         if (info->state & (1<<MD_SB_CLEAN))
7147                 mddev->recovery_cp = MaxSector;
7148         else
7149                 mddev->recovery_cp = 0;
7150         mddev->persistent    = ! info->not_persistent;
7151         mddev->external      = 0;
7152
7153         mddev->layout        = info->layout;
7154         if (mddev->level == 0)
7155                 /* Cannot trust RAID0 layout info here */
7156                 mddev->layout = -1;
7157         mddev->chunk_sectors = info->chunk_size >> 9;
7158
7159         if (mddev->persistent) {
7160                 mddev->max_disks = MD_SB_DISKS;
7161                 mddev->flags = 0;
7162                 mddev->sb_flags = 0;
7163         }
7164         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
7165
7166         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
7167         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
7168         mddev->bitmap_info.offset = 0;
7169
7170         mddev->reshape_position = MaxSector;
7171
7172         /*
7173          * Generate a 128 bit UUID
7174          */
7175         get_random_bytes(mddev->uuid, 16);
7176
7177         mddev->new_level = mddev->level;
7178         mddev->new_chunk_sectors = mddev->chunk_sectors;
7179         mddev->new_layout = mddev->layout;
7180         mddev->delta_disks = 0;
7181         mddev->reshape_backwards = 0;
7182
7183         return 0;
7184 }
7185
7186 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
7187 {
7188         lockdep_assert_held(&mddev->reconfig_mutex);
7189
7190         if (mddev->external_size)
7191                 return;
7192
7193         mddev->array_sectors = array_sectors;
7194 }
7195 EXPORT_SYMBOL(md_set_array_sectors);
7196
7197 static int update_size(struct mddev *mddev, sector_t num_sectors)
7198 {
7199         struct md_rdev *rdev;
7200         int rv;
7201         int fit = (num_sectors == 0);
7202         sector_t old_dev_sectors = mddev->dev_sectors;
7203
7204         if (mddev->pers->resize == NULL)
7205                 return -EINVAL;
7206         /* The "num_sectors" is the number of sectors of each device that
7207          * is used.  This can only make sense for arrays with redundancy.
7208          * linear and raid0 always use whatever space is available. We can only
7209          * consider changing this number if no resync or reconstruction is
7210          * happening, and if the new size is acceptable. It must fit before the
7211          * sb_start or, if that is <data_offset, it must fit before the size
7212          * of each device.  If num_sectors is zero, we find the largest size
7213          * that fits.
7214          */
7215         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7216             mddev->sync_thread)
7217                 return -EBUSY;
7218         if (!md_is_rdwr(mddev))
7219                 return -EROFS;
7220
7221         rdev_for_each(rdev, mddev) {
7222                 sector_t avail = rdev->sectors;
7223
7224                 if (fit && (num_sectors == 0 || num_sectors > avail))
7225                         num_sectors = avail;
7226                 if (avail < num_sectors)
7227                         return -ENOSPC;
7228         }
7229         rv = mddev->pers->resize(mddev, num_sectors);
7230         if (!rv) {
7231                 if (mddev_is_clustered(mddev))
7232                         md_cluster_ops->update_size(mddev, old_dev_sectors);
7233                 else if (mddev->queue) {
7234                         set_capacity_and_notify(mddev->gendisk,
7235                                                 mddev->array_sectors);
7236                 }
7237         }
7238         return rv;
7239 }
7240
7241 static int update_raid_disks(struct mddev *mddev, int raid_disks)
7242 {
7243         int rv;
7244         struct md_rdev *rdev;
7245         /* change the number of raid disks */
7246         if (mddev->pers->check_reshape == NULL)
7247                 return -EINVAL;
7248         if (!md_is_rdwr(mddev))
7249                 return -EROFS;
7250         if (raid_disks <= 0 ||
7251             (mddev->max_disks && raid_disks >= mddev->max_disks))
7252                 return -EINVAL;
7253         if (mddev->sync_thread ||
7254             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
7255             test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
7256             mddev->reshape_position != MaxSector)
7257                 return -EBUSY;
7258
7259         rdev_for_each(rdev, mddev) {
7260                 if (mddev->raid_disks < raid_disks &&
7261                     rdev->data_offset < rdev->new_data_offset)
7262                         return -EINVAL;
7263                 if (mddev->raid_disks > raid_disks &&
7264                     rdev->data_offset > rdev->new_data_offset)
7265                         return -EINVAL;
7266         }
7267
7268         mddev->delta_disks = raid_disks - mddev->raid_disks;
7269         if (mddev->delta_disks < 0)
7270                 mddev->reshape_backwards = 1;
7271         else if (mddev->delta_disks > 0)
7272                 mddev->reshape_backwards = 0;
7273
7274         rv = mddev->pers->check_reshape(mddev);
7275         if (rv < 0) {
7276                 mddev->delta_disks = 0;
7277                 mddev->reshape_backwards = 0;
7278         }
7279         return rv;
7280 }
7281
7282 /*
7283  * update_array_info is used to change the configuration of an
7284  * on-line array.
7285  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
7286  * fields in the info are checked against the array.
7287  * Any differences that cannot be handled will cause an error.
7288  * Normally, only one change can be managed at a time.
7289  */
7290 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
7291 {
7292         int rv = 0;
7293         int cnt = 0;
7294         int state = 0;
7295
7296         /* calculate expected state,ignoring low bits */
7297         if (mddev->bitmap && mddev->bitmap_info.offset)
7298                 state |= (1 << MD_SB_BITMAP_PRESENT);
7299
7300         if (mddev->major_version != info->major_version ||
7301             mddev->minor_version != info->minor_version ||
7302 /*          mddev->patch_version != info->patch_version || */
7303             mddev->ctime         != info->ctime         ||
7304             mddev->level         != info->level         ||
7305 /*          mddev->layout        != info->layout        || */
7306             mddev->persistent    != !info->not_persistent ||
7307             mddev->chunk_sectors != info->chunk_size >> 9 ||
7308             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
7309             ((state^info->state) & 0xfffffe00)
7310                 )
7311                 return -EINVAL;
7312         /* Check there is only one change */
7313         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7314                 cnt++;
7315         if (mddev->raid_disks != info->raid_disks)
7316                 cnt++;
7317         if (mddev->layout != info->layout)
7318                 cnt++;
7319         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
7320                 cnt++;
7321         if (cnt == 0)
7322                 return 0;
7323         if (cnt > 1)
7324                 return -EINVAL;
7325
7326         if (mddev->layout != info->layout) {
7327                 /* Change layout
7328                  * we don't need to do anything at the md level, the
7329                  * personality will take care of it all.
7330                  */
7331                 if (mddev->pers->check_reshape == NULL)
7332                         return -EINVAL;
7333                 else {
7334                         mddev->new_layout = info->layout;
7335                         rv = mddev->pers->check_reshape(mddev);
7336                         if (rv)
7337                                 mddev->new_layout = mddev->layout;
7338                         return rv;
7339                 }
7340         }
7341         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
7342                 rv = update_size(mddev, (sector_t)info->size * 2);
7343
7344         if (mddev->raid_disks    != info->raid_disks)
7345                 rv = update_raid_disks(mddev, info->raid_disks);
7346
7347         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
7348                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
7349                         rv = -EINVAL;
7350                         goto err;
7351                 }
7352                 if (mddev->recovery || mddev->sync_thread) {
7353                         rv = -EBUSY;
7354                         goto err;
7355                 }
7356                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
7357                         struct bitmap *bitmap;
7358                         /* add the bitmap */
7359                         if (mddev->bitmap) {
7360                                 rv = -EEXIST;
7361                                 goto err;
7362                         }
7363                         if (mddev->bitmap_info.default_offset == 0) {
7364                                 rv = -EINVAL;
7365                                 goto err;
7366                         }
7367                         mddev->bitmap_info.offset =
7368                                 mddev->bitmap_info.default_offset;
7369                         mddev->bitmap_info.space =
7370                                 mddev->bitmap_info.default_space;
7371                         bitmap = md_bitmap_create(mddev, -1);
7372                         mddev_suspend(mddev);
7373                         if (!IS_ERR(bitmap)) {
7374                                 mddev->bitmap = bitmap;
7375                                 rv = md_bitmap_load(mddev);
7376                         } else
7377                                 rv = PTR_ERR(bitmap);
7378                         if (rv)
7379                                 md_bitmap_destroy(mddev);
7380                         mddev_resume(mddev);
7381                 } else {
7382                         /* remove the bitmap */
7383                         if (!mddev->bitmap) {
7384                                 rv = -ENOENT;
7385                                 goto err;
7386                         }
7387                         if (mddev->bitmap->storage.file) {
7388                                 rv = -EINVAL;
7389                                 goto err;
7390                         }
7391                         if (mddev->bitmap_info.nodes) {
7392                                 /* hold PW on all the bitmap lock */
7393                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
7394                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
7395                                         rv = -EPERM;
7396                                         md_cluster_ops->unlock_all_bitmaps(mddev);
7397                                         goto err;
7398                                 }
7399
7400                                 mddev->bitmap_info.nodes = 0;
7401                                 md_cluster_ops->leave(mddev);
7402                                 module_put(md_cluster_mod);
7403                                 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
7404                         }
7405                         mddev_suspend(mddev);
7406                         md_bitmap_destroy(mddev);
7407                         mddev_resume(mddev);
7408                         mddev->bitmap_info.offset = 0;
7409                 }
7410         }
7411         md_update_sb(mddev, 1);
7412         return rv;
7413 err:
7414         return rv;
7415 }
7416
7417 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
7418 {
7419         struct md_rdev *rdev;
7420         int err = 0;
7421
7422         if (mddev->pers == NULL)
7423                 return -ENODEV;
7424
7425         rcu_read_lock();
7426         rdev = md_find_rdev_rcu(mddev, dev);
7427         if (!rdev)
7428                 err =  -ENODEV;
7429         else {
7430                 md_error(mddev, rdev);
7431                 if (test_bit(MD_BROKEN, &mddev->flags))
7432                         err = -EBUSY;
7433         }
7434         rcu_read_unlock();
7435         return err;
7436 }
7437
7438 /*
7439  * We have a problem here : there is no easy way to give a CHS
7440  * virtual geometry. We currently pretend that we have a 2 heads
7441  * 4 sectors (with a BIG number of cylinders...). This drives
7442  * dosfs just mad... ;-)
7443  */
7444 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
7445 {
7446         struct mddev *mddev = bdev->bd_disk->private_data;
7447
7448         geo->heads = 2;
7449         geo->sectors = 4;
7450         geo->cylinders = mddev->array_sectors / 8;
7451         return 0;
7452 }
7453
7454 static inline bool md_ioctl_valid(unsigned int cmd)
7455 {
7456         switch (cmd) {
7457         case ADD_NEW_DISK:
7458         case GET_ARRAY_INFO:
7459         case GET_BITMAP_FILE:
7460         case GET_DISK_INFO:
7461         case HOT_ADD_DISK:
7462         case HOT_REMOVE_DISK:
7463         case RAID_VERSION:
7464         case RESTART_ARRAY_RW:
7465         case RUN_ARRAY:
7466         case SET_ARRAY_INFO:
7467         case SET_BITMAP_FILE:
7468         case SET_DISK_FAULTY:
7469         case STOP_ARRAY:
7470         case STOP_ARRAY_RO:
7471         case CLUSTERED_DISK_NACK:
7472                 return true;
7473         default:
7474                 return false;
7475         }
7476 }
7477
7478 static int __md_set_array_info(struct mddev *mddev, void __user *argp)
7479 {
7480         mdu_array_info_t info;
7481         int err;
7482
7483         if (!argp)
7484                 memset(&info, 0, sizeof(info));
7485         else if (copy_from_user(&info, argp, sizeof(info)))
7486                 return -EFAULT;
7487
7488         if (mddev->pers) {
7489                 err = update_array_info(mddev, &info);
7490                 if (err)
7491                         pr_warn("md: couldn't update array info. %d\n", err);
7492                 return err;
7493         }
7494
7495         if (!list_empty(&mddev->disks)) {
7496                 pr_warn("md: array %s already has disks!\n", mdname(mddev));
7497                 return -EBUSY;
7498         }
7499
7500         if (mddev->raid_disks) {
7501                 pr_warn("md: array %s already initialised!\n", mdname(mddev));
7502                 return -EBUSY;
7503         }
7504
7505         err = md_set_array_info(mddev, &info);
7506         if (err)
7507                 pr_warn("md: couldn't set array info. %d\n", err);
7508
7509         return err;
7510 }
7511
7512 static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
7513                         unsigned int cmd, unsigned long arg)
7514 {
7515         int err = 0;
7516         void __user *argp = (void __user *)arg;
7517         struct mddev *mddev = NULL;
7518         bool did_set_md_closing = false;
7519
7520         if (!md_ioctl_valid(cmd))
7521                 return -ENOTTY;
7522
7523         switch (cmd) {
7524         case RAID_VERSION:
7525         case GET_ARRAY_INFO:
7526         case GET_DISK_INFO:
7527                 break;
7528         default:
7529                 if (!capable(CAP_SYS_ADMIN))
7530                         return -EACCES;
7531         }
7532
7533         /*
7534          * Commands dealing with the RAID driver but not any
7535          * particular array:
7536          */
7537         switch (cmd) {
7538         case RAID_VERSION:
7539                 err = get_version(argp);
7540                 goto out;
7541         default:;
7542         }
7543
7544         /*
7545          * Commands creating/starting a new array:
7546          */
7547
7548         mddev = bdev->bd_disk->private_data;
7549
7550         if (!mddev) {
7551                 BUG();
7552                 goto out;
7553         }
7554
7555         /* Some actions do not requires the mutex */
7556         switch (cmd) {
7557         case GET_ARRAY_INFO:
7558                 if (!mddev->raid_disks && !mddev->external)
7559                         err = -ENODEV;
7560                 else
7561                         err = get_array_info(mddev, argp);
7562                 goto out;
7563
7564         case GET_DISK_INFO:
7565                 if (!mddev->raid_disks && !mddev->external)
7566                         err = -ENODEV;
7567                 else
7568                         err = get_disk_info(mddev, argp);
7569                 goto out;
7570
7571         case SET_DISK_FAULTY:
7572                 err = set_disk_faulty(mddev, new_decode_dev(arg));
7573                 goto out;
7574
7575         case GET_BITMAP_FILE:
7576                 err = get_bitmap_file(mddev, argp);
7577                 goto out;
7578
7579         }
7580
7581         if (cmd == HOT_REMOVE_DISK)
7582                 /* need to ensure recovery thread has run */
7583                 wait_event_interruptible_timeout(mddev->sb_wait,
7584                                                  !test_bit(MD_RECOVERY_NEEDED,
7585                                                            &mddev->recovery),
7586                                                  msecs_to_jiffies(5000));
7587         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
7588                 /* Need to flush page cache, and ensure no-one else opens
7589                  * and writes
7590                  */
7591                 mutex_lock(&mddev->open_mutex);
7592                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
7593                         mutex_unlock(&mddev->open_mutex);
7594                         err = -EBUSY;
7595                         goto out;
7596                 }
7597                 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
7598                         mutex_unlock(&mddev->open_mutex);
7599                         err = -EBUSY;
7600                         goto out;
7601                 }
7602                 did_set_md_closing = true;
7603                 mutex_unlock(&mddev->open_mutex);
7604                 sync_blockdev(bdev);
7605         }
7606         err = mddev_lock(mddev);
7607         if (err) {
7608                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
7609                          err, cmd);
7610                 goto out;
7611         }
7612
7613         if (cmd == SET_ARRAY_INFO) {
7614                 err = __md_set_array_info(mddev, argp);
7615                 goto unlock;
7616         }
7617
7618         /*
7619          * Commands querying/configuring an existing array:
7620          */
7621         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
7622          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
7623         if ((!mddev->raid_disks && !mddev->external)
7624             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
7625             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
7626             && cmd != GET_BITMAP_FILE) {
7627                 err = -ENODEV;
7628                 goto unlock;
7629         }
7630
7631         /*
7632          * Commands even a read-only array can execute:
7633          */
7634         switch (cmd) {
7635         case RESTART_ARRAY_RW:
7636                 err = restart_array(mddev);
7637                 goto unlock;
7638
7639         case STOP_ARRAY:
7640                 err = do_md_stop(mddev, 0, bdev);
7641                 goto unlock;
7642
7643         case STOP_ARRAY_RO:
7644                 err = md_set_readonly(mddev, bdev);
7645                 goto unlock;
7646
7647         case HOT_REMOVE_DISK:
7648                 err = hot_remove_disk(mddev, new_decode_dev(arg));
7649                 goto unlock;
7650
7651         case ADD_NEW_DISK:
7652                 /* We can support ADD_NEW_DISK on read-only arrays
7653                  * only if we are re-adding a preexisting device.
7654                  * So require mddev->pers and MD_DISK_SYNC.
7655                  */
7656                 if (mddev->pers) {
7657                         mdu_disk_info_t info;
7658                         if (copy_from_user(&info, argp, sizeof(info)))
7659                                 err = -EFAULT;
7660                         else if (!(info.state & (1<<MD_DISK_SYNC)))
7661                                 /* Need to clear read-only for this */
7662                                 break;
7663                         else
7664                                 err = md_add_new_disk(mddev, &info);
7665                         goto unlock;
7666                 }
7667                 break;
7668         }
7669
7670         /*
7671          * The remaining ioctls are changing the state of the
7672          * superblock, so we do not allow them on read-only arrays.
7673          */
7674         if (!md_is_rdwr(mddev) && mddev->pers) {
7675                 if (mddev->ro != MD_AUTO_READ) {
7676                         err = -EROFS;
7677                         goto unlock;
7678                 }
7679                 mddev->ro = MD_RDWR;
7680                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7681                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7682                 /* mddev_unlock will wake thread */
7683                 /* If a device failed while we were read-only, we
7684                  * need to make sure the metadata is updated now.
7685                  */
7686                 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7687                         mddev_unlock(mddev);
7688                         wait_event(mddev->sb_wait,
7689                                    !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7690                                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7691                         mddev_lock_nointr(mddev);
7692                 }
7693         }
7694
7695         switch (cmd) {
7696         case ADD_NEW_DISK:
7697         {
7698                 mdu_disk_info_t info;
7699                 if (copy_from_user(&info, argp, sizeof(info)))
7700                         err = -EFAULT;
7701                 else
7702                         err = md_add_new_disk(mddev, &info);
7703                 goto unlock;
7704         }
7705
7706         case CLUSTERED_DISK_NACK:
7707                 if (mddev_is_clustered(mddev))
7708                         md_cluster_ops->new_disk_ack(mddev, false);
7709                 else
7710                         err = -EINVAL;
7711                 goto unlock;
7712
7713         case HOT_ADD_DISK:
7714                 err = hot_add_disk(mddev, new_decode_dev(arg));
7715                 goto unlock;
7716
7717         case RUN_ARRAY:
7718                 err = do_md_run(mddev);
7719                 goto unlock;
7720
7721         case SET_BITMAP_FILE:
7722                 err = set_bitmap_file(mddev, (int)arg);
7723                 goto unlock;
7724
7725         default:
7726                 err = -EINVAL;
7727                 goto unlock;
7728         }
7729
7730 unlock:
7731         if (mddev->hold_active == UNTIL_IOCTL &&
7732             err != -EINVAL)
7733                 mddev->hold_active = 0;
7734         mddev_unlock(mddev);
7735 out:
7736         if(did_set_md_closing)
7737                 clear_bit(MD_CLOSING, &mddev->flags);
7738         return err;
7739 }
7740 #ifdef CONFIG_COMPAT
7741 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
7742                     unsigned int cmd, unsigned long arg)
7743 {
7744         switch (cmd) {
7745         case HOT_REMOVE_DISK:
7746         case HOT_ADD_DISK:
7747         case SET_DISK_FAULTY:
7748         case SET_BITMAP_FILE:
7749                 /* These take in integer arg, do not convert */
7750                 break;
7751         default:
7752                 arg = (unsigned long)compat_ptr(arg);
7753                 break;
7754         }
7755
7756         return md_ioctl(bdev, mode, cmd, arg);
7757 }
7758 #endif /* CONFIG_COMPAT */
7759
7760 static int md_set_read_only(struct block_device *bdev, bool ro)
7761 {
7762         struct mddev *mddev = bdev->bd_disk->private_data;
7763         int err;
7764
7765         err = mddev_lock(mddev);
7766         if (err)
7767                 return err;
7768
7769         if (!mddev->raid_disks && !mddev->external) {
7770                 err = -ENODEV;
7771                 goto out_unlock;
7772         }
7773
7774         /*
7775          * Transitioning to read-auto need only happen for arrays that call
7776          * md_write_start and which are not ready for writes yet.
7777          */
7778         if (!ro && mddev->ro == MD_RDONLY && mddev->pers) {
7779                 err = restart_array(mddev);
7780                 if (err)
7781                         goto out_unlock;
7782                 mddev->ro = MD_AUTO_READ;
7783         }
7784
7785 out_unlock:
7786         mddev_unlock(mddev);
7787         return err;
7788 }
7789
7790 static int md_open(struct gendisk *disk, blk_mode_t mode)
7791 {
7792         struct mddev *mddev;
7793         int err;
7794
7795         spin_lock(&all_mddevs_lock);
7796         mddev = mddev_get(disk->private_data);
7797         spin_unlock(&all_mddevs_lock);
7798         if (!mddev)
7799                 return -ENODEV;
7800
7801         err = mutex_lock_interruptible(&mddev->open_mutex);
7802         if (err)
7803                 goto out;
7804
7805         err = -ENODEV;
7806         if (test_bit(MD_CLOSING, &mddev->flags))
7807                 goto out_unlock;
7808
7809         atomic_inc(&mddev->openers);
7810         mutex_unlock(&mddev->open_mutex);
7811
7812         disk_check_media_change(disk);
7813         return 0;
7814
7815 out_unlock:
7816         mutex_unlock(&mddev->open_mutex);
7817 out:
7818         mddev_put(mddev);
7819         return err;
7820 }
7821
7822 static void md_release(struct gendisk *disk)
7823 {
7824         struct mddev *mddev = disk->private_data;
7825
7826         BUG_ON(!mddev);
7827         atomic_dec(&mddev->openers);
7828         mddev_put(mddev);
7829 }
7830
7831 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing)
7832 {
7833         struct mddev *mddev = disk->private_data;
7834         unsigned int ret = 0;
7835
7836         if (mddev->changed)
7837                 ret = DISK_EVENT_MEDIA_CHANGE;
7838         mddev->changed = 0;
7839         return ret;
7840 }
7841
7842 static void md_free_disk(struct gendisk *disk)
7843 {
7844         struct mddev *mddev = disk->private_data;
7845
7846         percpu_ref_exit(&mddev->writes_pending);
7847         mddev_free(mddev);
7848 }
7849
7850 const struct block_device_operations md_fops =
7851 {
7852         .owner          = THIS_MODULE,
7853         .submit_bio     = md_submit_bio,
7854         .open           = md_open,
7855         .release        = md_release,
7856         .ioctl          = md_ioctl,
7857 #ifdef CONFIG_COMPAT
7858         .compat_ioctl   = md_compat_ioctl,
7859 #endif
7860         .getgeo         = md_getgeo,
7861         .check_events   = md_check_events,
7862         .set_read_only  = md_set_read_only,
7863         .free_disk      = md_free_disk,
7864 };
7865
7866 static int md_thread(void *arg)
7867 {
7868         struct md_thread *thread = arg;
7869
7870         /*
7871          * md_thread is a 'system-thread', it's priority should be very
7872          * high. We avoid resource deadlocks individually in each
7873          * raid personality. (RAID5 does preallocation) We also use RR and
7874          * the very same RT priority as kswapd, thus we will never get
7875          * into a priority inversion deadlock.
7876          *
7877          * we definitely have to have equal or higher priority than
7878          * bdflush, otherwise bdflush will deadlock if there are too
7879          * many dirty RAID5 blocks.
7880          */
7881
7882         allow_signal(SIGKILL);
7883         while (!kthread_should_stop()) {
7884
7885                 /* We need to wait INTERRUPTIBLE so that
7886                  * we don't add to the load-average.
7887                  * That means we need to be sure no signals are
7888                  * pending
7889                  */
7890                 if (signal_pending(current))
7891                         flush_signals(current);
7892
7893                 wait_event_interruptible_timeout
7894                         (thread->wqueue,
7895                          test_bit(THREAD_WAKEUP, &thread->flags)
7896                          || kthread_should_stop() || kthread_should_park(),
7897                          thread->timeout);
7898
7899                 clear_bit(THREAD_WAKEUP, &thread->flags);
7900                 if (kthread_should_park())
7901                         kthread_parkme();
7902                 if (!kthread_should_stop())
7903                         thread->run(thread);
7904         }
7905
7906         return 0;
7907 }
7908
7909 static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
7910 {
7911         struct md_thread *t;
7912
7913         rcu_read_lock();
7914         t = rcu_dereference(thread);
7915         if (t)
7916                 wake_up_process(t->tsk);
7917         rcu_read_unlock();
7918 }
7919
7920 void md_wakeup_thread(struct md_thread __rcu *thread)
7921 {
7922         struct md_thread *t;
7923
7924         rcu_read_lock();
7925         t = rcu_dereference(thread);
7926         if (t) {
7927                 pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
7928                 set_bit(THREAD_WAKEUP, &t->flags);
7929                 wake_up(&t->wqueue);
7930         }
7931         rcu_read_unlock();
7932 }
7933 EXPORT_SYMBOL(md_wakeup_thread);
7934
7935 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7936                 struct mddev *mddev, const char *name)
7937 {
7938         struct md_thread *thread;
7939
7940         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7941         if (!thread)
7942                 return NULL;
7943
7944         init_waitqueue_head(&thread->wqueue);
7945
7946         thread->run = run;
7947         thread->mddev = mddev;
7948         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7949         thread->tsk = kthread_run(md_thread, thread,
7950                                   "%s_%s",
7951                                   mdname(thread->mddev),
7952                                   name);
7953         if (IS_ERR(thread->tsk)) {
7954                 kfree(thread);
7955                 return NULL;
7956         }
7957         return thread;
7958 }
7959 EXPORT_SYMBOL(md_register_thread);
7960
7961 void md_unregister_thread(struct md_thread __rcu **threadp)
7962 {
7963         struct md_thread *thread = rcu_dereference_protected(*threadp, true);
7964
7965         if (!thread)
7966                 return;
7967
7968         rcu_assign_pointer(*threadp, NULL);
7969         synchronize_rcu();
7970
7971         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7972         kthread_stop(thread->tsk);
7973         kfree(thread);
7974 }
7975 EXPORT_SYMBOL(md_unregister_thread);
7976
7977 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7978 {
7979         if (!rdev || test_bit(Faulty, &rdev->flags))
7980                 return;
7981
7982         if (!mddev->pers || !mddev->pers->error_handler)
7983                 return;
7984         mddev->pers->error_handler(mddev, rdev);
7985
7986         if (mddev->pers->level == 0 || mddev->pers->level == LEVEL_LINEAR)
7987                 return;
7988
7989         if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags))
7990                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7991         sysfs_notify_dirent_safe(rdev->sysfs_state);
7992         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7993         if (!test_bit(MD_BROKEN, &mddev->flags)) {
7994                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7995                 md_wakeup_thread(mddev->thread);
7996         }
7997         if (mddev->event_work.func)
7998                 queue_work(md_misc_wq, &mddev->event_work);
7999         md_new_event();
8000 }
8001 EXPORT_SYMBOL(md_error);
8002
8003 /* seq_file implementation /proc/mdstat */
8004
8005 static void status_unused(struct seq_file *seq)
8006 {
8007         int i = 0;
8008         struct md_rdev *rdev;
8009
8010         seq_printf(seq, "unused devices: ");
8011
8012         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8013                 i++;
8014                 seq_printf(seq, "%pg ", rdev->bdev);
8015         }
8016         if (!i)
8017                 seq_printf(seq, "<none>");
8018
8019         seq_printf(seq, "\n");
8020 }
8021
8022 static int status_resync(struct seq_file *seq, struct mddev *mddev)
8023 {
8024         sector_t max_sectors, resync, res;
8025         unsigned long dt, db = 0;
8026         sector_t rt, curr_mark_cnt, resync_mark_cnt;
8027         int scale, recovery_active;
8028         unsigned int per_milli;
8029
8030         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8031             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8032                 max_sectors = mddev->resync_max_sectors;
8033         else
8034                 max_sectors = mddev->dev_sectors;
8035
8036         resync = mddev->curr_resync;
8037         if (resync < MD_RESYNC_ACTIVE) {
8038                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
8039                         /* Still cleaning up */
8040                         resync = max_sectors;
8041         } else if (resync > max_sectors) {
8042                 resync = max_sectors;
8043         } else {
8044                 res = atomic_read(&mddev->recovery_active);
8045                 /*
8046                  * Resync has started, but the subtraction has overflowed or
8047                  * yielded one of the special values. Force it to active to
8048                  * ensure the status reports an active resync.
8049                  */
8050                 if (resync < res || resync - res < MD_RESYNC_ACTIVE)
8051                         resync = MD_RESYNC_ACTIVE;
8052                 else
8053                         resync -= res;
8054         }
8055
8056         if (resync == MD_RESYNC_NONE) {
8057                 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) {
8058                         struct md_rdev *rdev;
8059
8060                         rdev_for_each(rdev, mddev)
8061                                 if (rdev->raid_disk >= 0 &&
8062                                     !test_bit(Faulty, &rdev->flags) &&
8063                                     rdev->recovery_offset != MaxSector &&
8064                                     rdev->recovery_offset) {
8065                                         seq_printf(seq, "\trecover=REMOTE");
8066                                         return 1;
8067                                 }
8068                         if (mddev->reshape_position != MaxSector)
8069                                 seq_printf(seq, "\treshape=REMOTE");
8070                         else
8071                                 seq_printf(seq, "\tresync=REMOTE");
8072                         return 1;
8073                 }
8074                 if (mddev->recovery_cp < MaxSector) {
8075                         seq_printf(seq, "\tresync=PENDING");
8076                         return 1;
8077                 }
8078                 return 0;
8079         }
8080         if (resync < MD_RESYNC_ACTIVE) {
8081                 seq_printf(seq, "\tresync=DELAYED");
8082                 return 1;
8083         }
8084
8085         WARN_ON(max_sectors == 0);
8086         /* Pick 'scale' such that (resync>>scale)*1000 will fit
8087          * in a sector_t, and (max_sectors>>scale) will fit in a
8088          * u32, as those are the requirements for sector_div.
8089          * Thus 'scale' must be at least 10
8090          */
8091         scale = 10;
8092         if (sizeof(sector_t) > sizeof(unsigned long)) {
8093                 while ( max_sectors/2 > (1ULL<<(scale+32)))
8094                         scale++;
8095         }
8096         res = (resync>>scale)*1000;
8097         sector_div(res, (u32)((max_sectors>>scale)+1));
8098
8099         per_milli = res;
8100         {
8101                 int i, x = per_milli/50, y = 20-x;
8102                 seq_printf(seq, "[");
8103                 for (i = 0; i < x; i++)
8104                         seq_printf(seq, "=");
8105                 seq_printf(seq, ">");
8106                 for (i = 0; i < y; i++)
8107                         seq_printf(seq, ".");
8108                 seq_printf(seq, "] ");
8109         }
8110         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
8111                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
8112                     "reshape" :
8113                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
8114                      "check" :
8115                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
8116                       "resync" : "recovery"))),
8117                    per_milli/10, per_milli % 10,
8118                    (unsigned long long) resync/2,
8119                    (unsigned long long) max_sectors/2);
8120
8121         /*
8122          * dt: time from mark until now
8123          * db: blocks written from mark until now
8124          * rt: remaining time
8125          *
8126          * rt is a sector_t, which is always 64bit now. We are keeping
8127          * the original algorithm, but it is not really necessary.
8128          *
8129          * Original algorithm:
8130          *   So we divide before multiply in case it is 32bit and close
8131          *   to the limit.
8132          *   We scale the divisor (db) by 32 to avoid losing precision
8133          *   near the end of resync when the number of remaining sectors
8134          *   is close to 'db'.
8135          *   We then divide rt by 32 after multiplying by db to compensate.
8136          *   The '+1' avoids division by zero if db is very small.
8137          */
8138         dt = ((jiffies - mddev->resync_mark) / HZ);
8139         if (!dt) dt++;
8140
8141         curr_mark_cnt = mddev->curr_mark_cnt;
8142         recovery_active = atomic_read(&mddev->recovery_active);
8143         resync_mark_cnt = mddev->resync_mark_cnt;
8144
8145         if (curr_mark_cnt >= (recovery_active + resync_mark_cnt))
8146                 db = curr_mark_cnt - (recovery_active + resync_mark_cnt);
8147
8148         rt = max_sectors - resync;    /* number of remaining sectors */
8149         rt = div64_u64(rt, db/32+1);
8150         rt *= dt;
8151         rt >>= 5;
8152
8153         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
8154                    ((unsigned long)rt % 60)/6);
8155
8156         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
8157         return 1;
8158 }
8159
8160 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8161 {
8162         struct list_head *tmp;
8163         loff_t l = *pos;
8164         struct mddev *mddev;
8165
8166         if (l == 0x10000) {
8167                 ++*pos;
8168                 return (void *)2;
8169         }
8170         if (l > 0x10000)
8171                 return NULL;
8172         if (!l--)
8173                 /* header */
8174                 return (void*)1;
8175
8176         spin_lock(&all_mddevs_lock);
8177         list_for_each(tmp,&all_mddevs)
8178                 if (!l--) {
8179                         mddev = list_entry(tmp, struct mddev, all_mddevs);
8180                         if (!mddev_get(mddev))
8181                                 continue;
8182                         spin_unlock(&all_mddevs_lock);
8183                         return mddev;
8184                 }
8185         spin_unlock(&all_mddevs_lock);
8186         if (!l--)
8187                 return (void*)2;/* tail */
8188         return NULL;
8189 }
8190
8191 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
8192 {
8193         struct list_head *tmp;
8194         struct mddev *next_mddev, *mddev = v;
8195         struct mddev *to_put = NULL;
8196
8197         ++*pos;
8198         if (v == (void*)2)
8199                 return NULL;
8200
8201         spin_lock(&all_mddevs_lock);
8202         if (v == (void*)1) {
8203                 tmp = all_mddevs.next;
8204         } else {
8205                 to_put = mddev;
8206                 tmp = mddev->all_mddevs.next;
8207         }
8208
8209         for (;;) {
8210                 if (tmp == &all_mddevs) {
8211                         next_mddev = (void*)2;
8212                         *pos = 0x10000;
8213                         break;
8214                 }
8215                 next_mddev = list_entry(tmp, struct mddev, all_mddevs);
8216                 if (mddev_get(next_mddev))
8217                         break;
8218                 mddev = next_mddev;
8219                 tmp = mddev->all_mddevs.next;
8220         }
8221         spin_unlock(&all_mddevs_lock);
8222
8223         if (to_put)
8224                 mddev_put(mddev);
8225         return next_mddev;
8226
8227 }
8228
8229 static void md_seq_stop(struct seq_file *seq, void *v)
8230 {
8231         struct mddev *mddev = v;
8232
8233         if (mddev && v != (void*)1 && v != (void*)2)
8234                 mddev_put(mddev);
8235 }
8236
8237 static int md_seq_show(struct seq_file *seq, void *v)
8238 {
8239         struct mddev *mddev = v;
8240         sector_t sectors;
8241         struct md_rdev *rdev;
8242
8243         if (v == (void*)1) {
8244                 struct md_personality *pers;
8245                 seq_printf(seq, "Personalities : ");
8246                 spin_lock(&pers_lock);
8247                 list_for_each_entry(pers, &pers_list, list)
8248                         seq_printf(seq, "[%s] ", pers->name);
8249
8250                 spin_unlock(&pers_lock);
8251                 seq_printf(seq, "\n");
8252                 seq->poll_event = atomic_read(&md_event_count);
8253                 return 0;
8254         }
8255         if (v == (void*)2) {
8256                 status_unused(seq);
8257                 return 0;
8258         }
8259
8260         spin_lock(&mddev->lock);
8261         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
8262                 seq_printf(seq, "%s : %sactive", mdname(mddev),
8263                                                 mddev->pers ? "" : "in");
8264                 if (mddev->pers) {
8265                         if (mddev->ro == MD_RDONLY)
8266                                 seq_printf(seq, " (read-only)");
8267                         if (mddev->ro == MD_AUTO_READ)
8268                                 seq_printf(seq, " (auto-read-only)");
8269                         seq_printf(seq, " %s", mddev->pers->name);
8270                 }
8271
8272                 sectors = 0;
8273                 rcu_read_lock();
8274                 rdev_for_each_rcu(rdev, mddev) {
8275                         seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
8276
8277                         if (test_bit(WriteMostly, &rdev->flags))
8278                                 seq_printf(seq, "(W)");
8279                         if (test_bit(Journal, &rdev->flags))
8280                                 seq_printf(seq, "(J)");
8281                         if (test_bit(Faulty, &rdev->flags)) {
8282                                 seq_printf(seq, "(F)");
8283                                 continue;
8284                         }
8285                         if (rdev->raid_disk < 0)
8286                                 seq_printf(seq, "(S)"); /* spare */
8287                         if (test_bit(Replacement, &rdev->flags))
8288                                 seq_printf(seq, "(R)");
8289                         sectors += rdev->sectors;
8290                 }
8291                 rcu_read_unlock();
8292
8293                 if (!list_empty(&mddev->disks)) {
8294                         if (mddev->pers)
8295                                 seq_printf(seq, "\n      %llu blocks",
8296                                            (unsigned long long)
8297                                            mddev->array_sectors / 2);
8298                         else
8299                                 seq_printf(seq, "\n      %llu blocks",
8300                                            (unsigned long long)sectors / 2);
8301                 }
8302                 if (mddev->persistent) {
8303                         if (mddev->major_version != 0 ||
8304                             mddev->minor_version != 90) {
8305                                 seq_printf(seq," super %d.%d",
8306                                            mddev->major_version,
8307                                            mddev->minor_version);
8308                         }
8309                 } else if (mddev->external)
8310                         seq_printf(seq, " super external:%s",
8311                                    mddev->metadata_type);
8312                 else
8313                         seq_printf(seq, " super non-persistent");
8314
8315                 if (mddev->pers) {
8316                         mddev->pers->status(seq, mddev);
8317                         seq_printf(seq, "\n      ");
8318                         if (mddev->pers->sync_request) {
8319                                 if (status_resync(seq, mddev))
8320                                         seq_printf(seq, "\n      ");
8321                         }
8322                 } else
8323                         seq_printf(seq, "\n       ");
8324
8325                 md_bitmap_status(seq, mddev->bitmap);
8326
8327                 seq_printf(seq, "\n");
8328         }
8329         spin_unlock(&mddev->lock);
8330
8331         return 0;
8332 }
8333
8334 static const struct seq_operations md_seq_ops = {
8335         .start  = md_seq_start,
8336         .next   = md_seq_next,
8337         .stop   = md_seq_stop,
8338         .show   = md_seq_show,
8339 };
8340
8341 static int md_seq_open(struct inode *inode, struct file *file)
8342 {
8343         struct seq_file *seq;
8344         int error;
8345
8346         error = seq_open(file, &md_seq_ops);
8347         if (error)
8348                 return error;
8349
8350         seq = file->private_data;
8351         seq->poll_event = atomic_read(&md_event_count);
8352         return error;
8353 }
8354
8355 static int md_unloading;
8356 static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
8357 {
8358         struct seq_file *seq = filp->private_data;
8359         __poll_t mask;
8360
8361         if (md_unloading)
8362                 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
8363         poll_wait(filp, &md_event_waiters, wait);
8364
8365         /* always allow read */
8366         mask = EPOLLIN | EPOLLRDNORM;
8367
8368         if (seq->poll_event != atomic_read(&md_event_count))
8369                 mask |= EPOLLERR | EPOLLPRI;
8370         return mask;
8371 }
8372
8373 static const struct proc_ops mdstat_proc_ops = {
8374         .proc_open      = md_seq_open,
8375         .proc_read      = seq_read,
8376         .proc_lseek     = seq_lseek,
8377         .proc_release   = seq_release,
8378         .proc_poll      = mdstat_poll,
8379 };
8380
8381 int register_md_personality(struct md_personality *p)
8382 {
8383         pr_debug("md: %s personality registered for level %d\n",
8384                  p->name, p->level);
8385         spin_lock(&pers_lock);
8386         list_add_tail(&p->list, &pers_list);
8387         spin_unlock(&pers_lock);
8388         return 0;
8389 }
8390 EXPORT_SYMBOL(register_md_personality);
8391
8392 int unregister_md_personality(struct md_personality *p)
8393 {
8394         pr_debug("md: %s personality unregistered\n", p->name);
8395         spin_lock(&pers_lock);
8396         list_del_init(&p->list);
8397         spin_unlock(&pers_lock);
8398         return 0;
8399 }
8400 EXPORT_SYMBOL(unregister_md_personality);
8401
8402 int register_md_cluster_operations(struct md_cluster_operations *ops,
8403                                    struct module *module)
8404 {
8405         int ret = 0;
8406         spin_lock(&pers_lock);
8407         if (md_cluster_ops != NULL)
8408                 ret = -EALREADY;
8409         else {
8410                 md_cluster_ops = ops;
8411                 md_cluster_mod = module;
8412         }
8413         spin_unlock(&pers_lock);
8414         return ret;
8415 }
8416 EXPORT_SYMBOL(register_md_cluster_operations);
8417
8418 int unregister_md_cluster_operations(void)
8419 {
8420         spin_lock(&pers_lock);
8421         md_cluster_ops = NULL;
8422         spin_unlock(&pers_lock);
8423         return 0;
8424 }
8425 EXPORT_SYMBOL(unregister_md_cluster_operations);
8426
8427 int md_setup_cluster(struct mddev *mddev, int nodes)
8428 {
8429         int ret;
8430         if (!md_cluster_ops)
8431                 request_module("md-cluster");
8432         spin_lock(&pers_lock);
8433         /* ensure module won't be unloaded */
8434         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
8435                 pr_warn("can't find md-cluster module or get its reference.\n");
8436                 spin_unlock(&pers_lock);
8437                 return -ENOENT;
8438         }
8439         spin_unlock(&pers_lock);
8440
8441         ret = md_cluster_ops->join(mddev, nodes);
8442         if (!ret)
8443                 mddev->safemode_delay = 0;
8444         return ret;
8445 }
8446
8447 void md_cluster_stop(struct mddev *mddev)
8448 {
8449         if (!md_cluster_ops)
8450                 return;
8451         md_cluster_ops->leave(mddev);
8452         module_put(md_cluster_mod);
8453 }
8454
8455 static int is_mddev_idle(struct mddev *mddev, int init)
8456 {
8457         struct md_rdev *rdev;
8458         int idle;
8459         int curr_events;
8460
8461         idle = 1;
8462         rcu_read_lock();
8463         rdev_for_each_rcu(rdev, mddev) {
8464                 struct gendisk *disk = rdev->bdev->bd_disk;
8465                 curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
8466                               atomic_read(&disk->sync_io);
8467                 /* sync IO will cause sync_io to increase before the disk_stats
8468                  * as sync_io is counted when a request starts, and
8469                  * disk_stats is counted when it completes.
8470                  * So resync activity will cause curr_events to be smaller than
8471                  * when there was no such activity.
8472                  * non-sync IO will cause disk_stat to increase without
8473                  * increasing sync_io so curr_events will (eventually)
8474                  * be larger than it was before.  Once it becomes
8475                  * substantially larger, the test below will cause
8476                  * the array to appear non-idle, and resync will slow
8477                  * down.
8478                  * If there is a lot of outstanding resync activity when
8479                  * we set last_event to curr_events, then all that activity
8480                  * completing might cause the array to appear non-idle
8481                  * and resync will be slowed down even though there might
8482                  * not have been non-resync activity.  This will only
8483                  * happen once though.  'last_events' will soon reflect
8484                  * the state where there is little or no outstanding
8485                  * resync requests, and further resync activity will
8486                  * always make curr_events less than last_events.
8487                  *
8488                  */
8489                 if (init || curr_events - rdev->last_events > 64) {
8490                         rdev->last_events = curr_events;
8491                         idle = 0;
8492                 }
8493         }
8494         rcu_read_unlock();
8495         return idle;
8496 }
8497
8498 void md_done_sync(struct mddev *mddev, int blocks, int ok)
8499 {
8500         /* another "blocks" (512byte) blocks have been synced */
8501         atomic_sub(blocks, &mddev->recovery_active);
8502         wake_up(&mddev->recovery_wait);
8503         if (!ok) {
8504                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8505                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
8506                 md_wakeup_thread(mddev->thread);
8507                 // stop recovery, signal do_sync ....
8508         }
8509 }
8510 EXPORT_SYMBOL(md_done_sync);
8511
8512 /* md_write_start(mddev, bi)
8513  * If we need to update some array metadata (e.g. 'active' flag
8514  * in superblock) before writing, schedule a superblock update
8515  * and wait for it to complete.
8516  * A return value of 'false' means that the write wasn't recorded
8517  * and cannot proceed as the array is being suspend.
8518  */
8519 bool md_write_start(struct mddev *mddev, struct bio *bi)
8520 {
8521         int did_change = 0;
8522
8523         if (bio_data_dir(bi) != WRITE)
8524                 return true;
8525
8526         BUG_ON(mddev->ro == MD_RDONLY);
8527         if (mddev->ro == MD_AUTO_READ) {
8528                 /* need to switch to read/write */
8529                 mddev->ro = MD_RDWR;
8530                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8531                 md_wakeup_thread(mddev->thread);
8532                 md_wakeup_thread(mddev->sync_thread);
8533                 did_change = 1;
8534         }
8535         rcu_read_lock();
8536         percpu_ref_get(&mddev->writes_pending);
8537         smp_mb(); /* Match smp_mb in set_in_sync() */
8538         if (mddev->safemode == 1)
8539                 mddev->safemode = 0;
8540         /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
8541         if (mddev->in_sync || mddev->sync_checkers) {
8542                 spin_lock(&mddev->lock);
8543                 if (mddev->in_sync) {
8544                         mddev->in_sync = 0;
8545                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8546                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8547                         md_wakeup_thread(mddev->thread);
8548                         did_change = 1;
8549                 }
8550                 spin_unlock(&mddev->lock);
8551         }
8552         rcu_read_unlock();
8553         if (did_change)
8554                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8555         if (!mddev->has_superblocks)
8556                 return true;
8557         wait_event(mddev->sb_wait,
8558                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
8559                    is_md_suspended(mddev));
8560         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
8561                 percpu_ref_put(&mddev->writes_pending);
8562                 return false;
8563         }
8564         return true;
8565 }
8566 EXPORT_SYMBOL(md_write_start);
8567
8568 /* md_write_inc can only be called when md_write_start() has
8569  * already been called at least once of the current request.
8570  * It increments the counter and is useful when a single request
8571  * is split into several parts.  Each part causes an increment and
8572  * so needs a matching md_write_end().
8573  * Unlike md_write_start(), it is safe to call md_write_inc() inside
8574  * a spinlocked region.
8575  */
8576 void md_write_inc(struct mddev *mddev, struct bio *bi)
8577 {
8578         if (bio_data_dir(bi) != WRITE)
8579                 return;
8580         WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev));
8581         percpu_ref_get(&mddev->writes_pending);
8582 }
8583 EXPORT_SYMBOL(md_write_inc);
8584
8585 void md_write_end(struct mddev *mddev)
8586 {
8587         percpu_ref_put(&mddev->writes_pending);
8588
8589         if (mddev->safemode == 2)
8590                 md_wakeup_thread(mddev->thread);
8591         else if (mddev->safemode_delay)
8592                 /* The roundup() ensures this only performs locking once
8593                  * every ->safemode_delay jiffies
8594                  */
8595                 mod_timer(&mddev->safemode_timer,
8596                           roundup(jiffies, mddev->safemode_delay) +
8597                           mddev->safemode_delay);
8598 }
8599
8600 EXPORT_SYMBOL(md_write_end);
8601
8602 /* This is used by raid0 and raid10 */
8603 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8604                         struct bio *bio, sector_t start, sector_t size)
8605 {
8606         struct bio *discard_bio = NULL;
8607
8608         if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
8609                         &discard_bio) || !discard_bio)
8610                 return;
8611
8612         bio_chain(discard_bio, bio);
8613         bio_clone_blkg_association(discard_bio, bio);
8614         if (mddev->gendisk)
8615                 trace_block_bio_remap(discard_bio,
8616                                 disk_devt(mddev->gendisk),
8617                                 bio->bi_iter.bi_sector);
8618         submit_bio_noacct(discard_bio);
8619 }
8620 EXPORT_SYMBOL_GPL(md_submit_discard_bio);
8621
8622 int acct_bioset_init(struct mddev *mddev)
8623 {
8624         int err = 0;
8625
8626         if (!bioset_initialized(&mddev->io_acct_set))
8627                 err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
8628                         offsetof(struct md_io_acct, bio_clone), 0);
8629         return err;
8630 }
8631 EXPORT_SYMBOL_GPL(acct_bioset_init);
8632
8633 void acct_bioset_exit(struct mddev *mddev)
8634 {
8635         bioset_exit(&mddev->io_acct_set);
8636 }
8637 EXPORT_SYMBOL_GPL(acct_bioset_exit);
8638
8639 static void md_end_io_acct(struct bio *bio)
8640 {
8641         struct md_io_acct *md_io_acct = bio->bi_private;
8642         struct bio *orig_bio = md_io_acct->orig_bio;
8643         struct mddev *mddev = md_io_acct->mddev;
8644
8645         orig_bio->bi_status = bio->bi_status;
8646
8647         bio_end_io_acct(orig_bio, md_io_acct->start_time);
8648         bio_put(bio);
8649         bio_endio(orig_bio);
8650
8651         percpu_ref_put(&mddev->active_io);
8652 }
8653
8654 /*
8655  * Used by personalities that don't already clone the bio and thus can't
8656  * easily add the timestamp to their extended bio structure.
8657  */
8658 void md_account_bio(struct mddev *mddev, struct bio **bio)
8659 {
8660         struct block_device *bdev = (*bio)->bi_bdev;
8661         struct md_io_acct *md_io_acct;
8662         struct bio *clone;
8663
8664         if (!blk_queue_io_stat(bdev->bd_disk->queue))
8665                 return;
8666
8667         percpu_ref_get(&mddev->active_io);
8668
8669         clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
8670         md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
8671         md_io_acct->orig_bio = *bio;
8672         md_io_acct->start_time = bio_start_io_acct(*bio);
8673         md_io_acct->mddev = mddev;
8674
8675         clone->bi_end_io = md_end_io_acct;
8676         clone->bi_private = md_io_acct;
8677         *bio = clone;
8678 }
8679 EXPORT_SYMBOL_GPL(md_account_bio);
8680
8681 /* md_allow_write(mddev)
8682  * Calling this ensures that the array is marked 'active' so that writes
8683  * may proceed without blocking.  It is important to call this before
8684  * attempting a GFP_KERNEL allocation while holding the mddev lock.
8685  * Must be called with mddev_lock held.
8686  */
8687 void md_allow_write(struct mddev *mddev)
8688 {
8689         if (!mddev->pers)
8690                 return;
8691         if (!md_is_rdwr(mddev))
8692                 return;
8693         if (!mddev->pers->sync_request)
8694                 return;
8695
8696         spin_lock(&mddev->lock);
8697         if (mddev->in_sync) {
8698                 mddev->in_sync = 0;
8699                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8700                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8701                 if (mddev->safemode_delay &&
8702                     mddev->safemode == 0)
8703                         mddev->safemode = 1;
8704                 spin_unlock(&mddev->lock);
8705                 md_update_sb(mddev, 0);
8706                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8707                 /* wait for the dirty state to be recorded in the metadata */
8708                 wait_event(mddev->sb_wait,
8709                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
8710         } else
8711                 spin_unlock(&mddev->lock);
8712 }
8713 EXPORT_SYMBOL_GPL(md_allow_write);
8714
8715 #define SYNC_MARKS      10
8716 #define SYNC_MARK_STEP  (3*HZ)
8717 #define UPDATE_FREQUENCY (5*60*HZ)
8718 void md_do_sync(struct md_thread *thread)
8719 {
8720         struct mddev *mddev = thread->mddev;
8721         struct mddev *mddev2;
8722         unsigned int currspeed = 0, window;
8723         sector_t max_sectors,j, io_sectors, recovery_done;
8724         unsigned long mark[SYNC_MARKS];
8725         unsigned long update_time;
8726         sector_t mark_cnt[SYNC_MARKS];
8727         int last_mark,m;
8728         sector_t last_check;
8729         int skipped = 0;
8730         struct md_rdev *rdev;
8731         char *desc, *action = NULL;
8732         struct blk_plug plug;
8733         int ret;
8734
8735         /* just incase thread restarts... */
8736         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8737             test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
8738                 return;
8739         if (!md_is_rdwr(mddev)) {/* never try to sync a read-only array */
8740                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8741                 return;
8742         }
8743
8744         if (mddev_is_clustered(mddev)) {
8745                 ret = md_cluster_ops->resync_start(mddev);
8746                 if (ret)
8747                         goto skip;
8748
8749                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
8750                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
8751                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
8752                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
8753                      && ((unsigned long long)mddev->curr_resync_completed
8754                          < (unsigned long long)mddev->resync_max_sectors))
8755                         goto skip;
8756         }
8757
8758         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8759                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
8760                         desc = "data-check";
8761                         action = "check";
8762                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8763                         desc = "requested-resync";
8764                         action = "repair";
8765                 } else
8766                         desc = "resync";
8767         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
8768                 desc = "reshape";
8769         else
8770                 desc = "recovery";
8771
8772         mddev->last_sync_action = action ?: desc;
8773
8774         /*
8775          * Before starting a resync we must have set curr_resync to
8776          * 2, and then checked that every "conflicting" array has curr_resync
8777          * less than ours.  When we find one that is the same or higher
8778          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
8779          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
8780          * This will mean we have to start checking from the beginning again.
8781          *
8782          */
8783
8784         do {
8785                 int mddev2_minor = -1;
8786                 mddev->curr_resync = MD_RESYNC_DELAYED;
8787
8788         try_again:
8789                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8790                         goto skip;
8791                 spin_lock(&all_mddevs_lock);
8792                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) {
8793                         if (test_bit(MD_DELETED, &mddev2->flags))
8794                                 continue;
8795                         if (mddev2 == mddev)
8796                                 continue;
8797                         if (!mddev->parallel_resync
8798                         &&  mddev2->curr_resync
8799                         &&  match_mddev_units(mddev, mddev2)) {
8800                                 DEFINE_WAIT(wq);
8801                                 if (mddev < mddev2 &&
8802                                     mddev->curr_resync == MD_RESYNC_DELAYED) {
8803                                         /* arbitrarily yield */
8804                                         mddev->curr_resync = MD_RESYNC_YIELDED;
8805                                         wake_up(&resync_wait);
8806                                 }
8807                                 if (mddev > mddev2 &&
8808                                     mddev->curr_resync == MD_RESYNC_YIELDED)
8809                                         /* no need to wait here, we can wait the next
8810                                          * time 'round when curr_resync == 2
8811                                          */
8812                                         continue;
8813                                 /* We need to wait 'interruptible' so as not to
8814                                  * contribute to the load average, and not to
8815                                  * be caught by 'softlockup'
8816                                  */
8817                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
8818                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8819                                     mddev2->curr_resync >= mddev->curr_resync) {
8820                                         if (mddev2_minor != mddev2->md_minor) {
8821                                                 mddev2_minor = mddev2->md_minor;
8822                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
8823                                                         desc, mdname(mddev),
8824                                                         mdname(mddev2));
8825                                         }
8826                                         spin_unlock(&all_mddevs_lock);
8827
8828                                         if (signal_pending(current))
8829                                                 flush_signals(current);
8830                                         schedule();
8831                                         finish_wait(&resync_wait, &wq);
8832                                         goto try_again;
8833                                 }
8834                                 finish_wait(&resync_wait, &wq);
8835                         }
8836                 }
8837                 spin_unlock(&all_mddevs_lock);
8838         } while (mddev->curr_resync < MD_RESYNC_DELAYED);
8839
8840         j = 0;
8841         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8842                 /* resync follows the size requested by the personality,
8843                  * which defaults to physical size, but can be virtual size
8844                  */
8845                 max_sectors = mddev->resync_max_sectors;
8846                 atomic64_set(&mddev->resync_mismatches, 0);
8847                 /* we don't use the checkpoint if there's a bitmap */
8848                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8849                         j = mddev->resync_min;
8850                 else if (!mddev->bitmap)
8851                         j = mddev->recovery_cp;
8852
8853         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
8854                 max_sectors = mddev->resync_max_sectors;
8855                 /*
8856                  * If the original node aborts reshaping then we continue the
8857                  * reshaping, so set j again to avoid restart reshape from the
8858                  * first beginning
8859                  */
8860                 if (mddev_is_clustered(mddev) &&
8861                     mddev->reshape_position != MaxSector)
8862                         j = mddev->reshape_position;
8863         } else {
8864                 /* recovery follows the physical size of devices */
8865                 max_sectors = mddev->dev_sectors;
8866                 j = MaxSector;
8867                 rcu_read_lock();
8868                 rdev_for_each_rcu(rdev, mddev)
8869                         if (rdev->raid_disk >= 0 &&
8870                             !test_bit(Journal, &rdev->flags) &&
8871                             !test_bit(Faulty, &rdev->flags) &&
8872                             !test_bit(In_sync, &rdev->flags) &&
8873                             rdev->recovery_offset < j)
8874                                 j = rdev->recovery_offset;
8875                 rcu_read_unlock();
8876
8877                 /* If there is a bitmap, we need to make sure all
8878                  * writes that started before we added a spare
8879                  * complete before we start doing a recovery.
8880                  * Otherwise the write might complete and (via
8881                  * bitmap_endwrite) set a bit in the bitmap after the
8882                  * recovery has checked that bit and skipped that
8883                  * region.
8884                  */
8885                 if (mddev->bitmap) {
8886                         mddev->pers->quiesce(mddev, 1);
8887                         mddev->pers->quiesce(mddev, 0);
8888                 }
8889         }
8890
8891         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8892         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8893         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8894                  speed_max(mddev), desc);
8895
8896         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8897
8898         io_sectors = 0;
8899         for (m = 0; m < SYNC_MARKS; m++) {
8900                 mark[m] = jiffies;
8901                 mark_cnt[m] = io_sectors;
8902         }
8903         last_mark = 0;
8904         mddev->resync_mark = mark[last_mark];
8905         mddev->resync_mark_cnt = mark_cnt[last_mark];
8906
8907         /*
8908          * Tune reconstruction:
8909          */
8910         window = 32 * (PAGE_SIZE / 512);
8911         pr_debug("md: using %dk window, over a total of %lluk.\n",
8912                  window/2, (unsigned long long)max_sectors/2);
8913
8914         atomic_set(&mddev->recovery_active, 0);
8915         last_check = 0;
8916
8917         if (j >= MD_RESYNC_ACTIVE) {
8918                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8919                          desc, mdname(mddev));
8920                 mddev->curr_resync = j;
8921         } else
8922                 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */
8923         mddev->curr_resync_completed = j;
8924         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8925         md_new_event();
8926         update_time = jiffies;
8927
8928         blk_start_plug(&plug);
8929         while (j < max_sectors) {
8930                 sector_t sectors;
8931
8932                 skipped = 0;
8933
8934                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8935                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8936                       (mddev->curr_resync - mddev->curr_resync_completed)
8937                       > (max_sectors >> 4)) ||
8938                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8939                      (j - mddev->curr_resync_completed)*2
8940                      >= mddev->resync_max - mddev->curr_resync_completed ||
8941                      mddev->curr_resync_completed > mddev->resync_max
8942                             )) {
8943                         /* time to update curr_resync_completed */
8944                         wait_event(mddev->recovery_wait,
8945                                    atomic_read(&mddev->recovery_active) == 0);
8946                         mddev->curr_resync_completed = j;
8947                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8948                             j > mddev->recovery_cp)
8949                                 mddev->recovery_cp = j;
8950                         update_time = jiffies;
8951                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8952                         sysfs_notify_dirent_safe(mddev->sysfs_completed);
8953                 }
8954
8955                 while (j >= mddev->resync_max &&
8956                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8957                         /* As this condition is controlled by user-space,
8958                          * we can block indefinitely, so use '_interruptible'
8959                          * to avoid triggering warnings.
8960                          */
8961                         flush_signals(current); /* just in case */
8962                         wait_event_interruptible(mddev->recovery_wait,
8963                                                  mddev->resync_max > j
8964                                                  || test_bit(MD_RECOVERY_INTR,
8965                                                              &mddev->recovery));
8966                 }
8967
8968                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8969                         break;
8970
8971                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8972                 if (sectors == 0) {
8973                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8974                         break;
8975                 }
8976
8977                 if (!skipped) { /* actual IO requested */
8978                         io_sectors += sectors;
8979                         atomic_add(sectors, &mddev->recovery_active);
8980                 }
8981
8982                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8983                         break;
8984
8985                 j += sectors;
8986                 if (j > max_sectors)
8987                         /* when skipping, extra large numbers can be returned. */
8988                         j = max_sectors;
8989                 if (j >= MD_RESYNC_ACTIVE)
8990                         mddev->curr_resync = j;
8991                 mddev->curr_mark_cnt = io_sectors;
8992                 if (last_check == 0)
8993                         /* this is the earliest that rebuild will be
8994                          * visible in /proc/mdstat
8995                          */
8996                         md_new_event();
8997
8998                 if (last_check + window > io_sectors || j == max_sectors)
8999                         continue;
9000
9001                 last_check = io_sectors;
9002         repeat:
9003                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
9004                         /* step marks */
9005                         int next = (last_mark+1) % SYNC_MARKS;
9006
9007                         mddev->resync_mark = mark[next];
9008                         mddev->resync_mark_cnt = mark_cnt[next];
9009                         mark[next] = jiffies;
9010                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
9011                         last_mark = next;
9012                 }
9013
9014                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9015                         break;
9016
9017                 /*
9018                  * this loop exits only if either when we are slower than
9019                  * the 'hard' speed limit, or the system was IO-idle for
9020                  * a jiffy.
9021                  * the system might be non-idle CPU-wise, but we only care
9022                  * about not overloading the IO subsystem. (things like an
9023                  * e2fsck being done on the RAID array should execute fast)
9024                  */
9025                 cond_resched();
9026
9027                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
9028                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
9029                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
9030
9031                 if (currspeed > speed_min(mddev)) {
9032                         if (currspeed > speed_max(mddev)) {
9033                                 msleep(500);
9034                                 goto repeat;
9035                         }
9036                         if (!is_mddev_idle(mddev, 0)) {
9037                                 /*
9038                                  * Give other IO more of a chance.
9039                                  * The faster the devices, the less we wait.
9040                                  */
9041                                 wait_event(mddev->recovery_wait,
9042                                            !atomic_read(&mddev->recovery_active));
9043                         }
9044                 }
9045         }
9046         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
9047                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
9048                 ? "interrupted" : "done");
9049         /*
9050          * this also signals 'finished resyncing' to md_stop
9051          */
9052         blk_finish_plug(&plug);
9053         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
9054
9055         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9056             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9057             mddev->curr_resync >= MD_RESYNC_ACTIVE) {
9058                 mddev->curr_resync_completed = mddev->curr_resync;
9059                 sysfs_notify_dirent_safe(mddev->sysfs_completed);
9060         }
9061         mddev->pers->sync_request(mddev, max_sectors, &skipped);
9062
9063         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
9064             mddev->curr_resync > MD_RESYNC_ACTIVE) {
9065                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
9066                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9067                                 if (mddev->curr_resync >= mddev->recovery_cp) {
9068                                         pr_debug("md: checkpointing %s of %s.\n",
9069                                                  desc, mdname(mddev));
9070                                         if (test_bit(MD_RECOVERY_ERROR,
9071                                                 &mddev->recovery))
9072                                                 mddev->recovery_cp =
9073                                                         mddev->curr_resync_completed;
9074                                         else
9075                                                 mddev->recovery_cp =
9076                                                         mddev->curr_resync;
9077                                 }
9078                         } else
9079                                 mddev->recovery_cp = MaxSector;
9080                 } else {
9081                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
9082                                 mddev->curr_resync = MaxSector;
9083                         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9084                             test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
9085                                 rcu_read_lock();
9086                                 rdev_for_each_rcu(rdev, mddev)
9087                                         if (rdev->raid_disk >= 0 &&
9088                                             mddev->delta_disks >= 0 &&
9089                                             !test_bit(Journal, &rdev->flags) &&
9090                                             !test_bit(Faulty, &rdev->flags) &&
9091                                             !test_bit(In_sync, &rdev->flags) &&
9092                                             rdev->recovery_offset < mddev->curr_resync)
9093                                                 rdev->recovery_offset = mddev->curr_resync;
9094                                 rcu_read_unlock();
9095                         }
9096                 }
9097         }
9098  skip:
9099         /* set CHANGE_PENDING here since maybe another update is needed,
9100          * so other nodes are informed. It should be harmless for normal
9101          * raid */
9102         set_mask_bits(&mddev->sb_flags, 0,
9103                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
9104
9105         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9106                         !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9107                         mddev->delta_disks > 0 &&
9108                         mddev->pers->finish_reshape &&
9109                         mddev->pers->size &&
9110                         mddev->queue) {
9111                 mddev_lock_nointr(mddev);
9112                 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
9113                 mddev_unlock(mddev);
9114                 if (!mddev_is_clustered(mddev))
9115                         set_capacity_and_notify(mddev->gendisk,
9116                                                 mddev->array_sectors);
9117         }
9118
9119         spin_lock(&mddev->lock);
9120         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
9121                 /* We completed so min/max setting can be forgotten if used. */
9122                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9123                         mddev->resync_min = 0;
9124                 mddev->resync_max = MaxSector;
9125         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
9126                 mddev->resync_min = mddev->curr_resync_completed;
9127         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
9128         mddev->curr_resync = MD_RESYNC_NONE;
9129         spin_unlock(&mddev->lock);
9130
9131         wake_up(&resync_wait);
9132         wake_up(&mddev->sb_wait);
9133         md_wakeup_thread(mddev->thread);
9134         return;
9135 }
9136 EXPORT_SYMBOL_GPL(md_do_sync);
9137
9138 static int remove_and_add_spares(struct mddev *mddev,
9139                                  struct md_rdev *this)
9140 {
9141         struct md_rdev *rdev;
9142         int spares = 0;
9143         int removed = 0;
9144         bool remove_some = false;
9145
9146         if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
9147                 /* Mustn't remove devices when resync thread is running */
9148                 return 0;
9149
9150         rdev_for_each(rdev, mddev) {
9151                 if ((this == NULL || rdev == this) &&
9152                     rdev->raid_disk >= 0 &&
9153                     !test_bit(Blocked, &rdev->flags) &&
9154                     test_bit(Faulty, &rdev->flags) &&
9155                     atomic_read(&rdev->nr_pending)==0) {
9156                         /* Faulty non-Blocked devices with nr_pending == 0
9157                          * never get nr_pending incremented,
9158                          * never get Faulty cleared, and never get Blocked set.
9159                          * So we can synchronize_rcu now rather than once per device
9160                          */
9161                         remove_some = true;
9162                         set_bit(RemoveSynchronized, &rdev->flags);
9163                 }
9164         }
9165
9166         if (remove_some)
9167                 synchronize_rcu();
9168         rdev_for_each(rdev, mddev) {
9169                 if ((this == NULL || rdev == this) &&
9170                     rdev->raid_disk >= 0 &&
9171                     !test_bit(Blocked, &rdev->flags) &&
9172                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
9173                      (!test_bit(In_sync, &rdev->flags) &&
9174                       !test_bit(Journal, &rdev->flags))) &&
9175                     atomic_read(&rdev->nr_pending)==0)) {
9176                         if (mddev->pers->hot_remove_disk(
9177                                     mddev, rdev) == 0) {
9178                                 sysfs_unlink_rdev(mddev, rdev);
9179                                 rdev->saved_raid_disk = rdev->raid_disk;
9180                                 rdev->raid_disk = -1;
9181                                 removed++;
9182                         }
9183                 }
9184                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9185                         clear_bit(RemoveSynchronized, &rdev->flags);
9186         }
9187
9188         if (removed && mddev->kobj.sd)
9189                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9190
9191         if (this && removed)
9192                 goto no_add;
9193
9194         rdev_for_each(rdev, mddev) {
9195                 if (this && this != rdev)
9196                         continue;
9197                 if (test_bit(Candidate, &rdev->flags))
9198                         continue;
9199                 if (rdev->raid_disk >= 0 &&
9200                     !test_bit(In_sync, &rdev->flags) &&
9201                     !test_bit(Journal, &rdev->flags) &&
9202                     !test_bit(Faulty, &rdev->flags))
9203                         spares++;
9204                 if (rdev->raid_disk >= 0)
9205                         continue;
9206                 if (test_bit(Faulty, &rdev->flags))
9207                         continue;
9208                 if (!test_bit(Journal, &rdev->flags)) {
9209                         if (!md_is_rdwr(mddev) &&
9210                             !(rdev->saved_raid_disk >= 0 &&
9211                               !test_bit(Bitmap_sync, &rdev->flags)))
9212                                 continue;
9213
9214                         rdev->recovery_offset = 0;
9215                 }
9216                 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9217                         /* failure here is OK */
9218                         sysfs_link_rdev(mddev, rdev);
9219                         if (!test_bit(Journal, &rdev->flags))
9220                                 spares++;
9221                         md_new_event();
9222                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9223                 }
9224         }
9225 no_add:
9226         if (removed)
9227                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9228         return spares;
9229 }
9230
9231 static void md_start_sync(struct work_struct *ws)
9232 {
9233         struct mddev *mddev = container_of(ws, struct mddev, del_work);
9234
9235         rcu_assign_pointer(mddev->sync_thread,
9236                            md_register_thread(md_do_sync, mddev, "resync"));
9237         if (!mddev->sync_thread) {
9238                 pr_warn("%s: could not start resync thread...\n",
9239                         mdname(mddev));
9240                 /* leave the spares where they are, it shouldn't hurt */
9241                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9242                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9243                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9244                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9245                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9246                 wake_up(&resync_wait);
9247                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9248                                        &mddev->recovery))
9249                         if (mddev->sysfs_action)
9250                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
9251         } else
9252                 md_wakeup_thread(mddev->sync_thread);
9253         sysfs_notify_dirent_safe(mddev->sysfs_action);
9254         md_new_event();
9255 }
9256
9257 /*
9258  * This routine is regularly called by all per-raid-array threads to
9259  * deal with generic issues like resync and super-block update.
9260  * Raid personalities that don't have a thread (linear/raid0) do not
9261  * need this as they never do any recovery or update the superblock.
9262  *
9263  * It does not do any resync itself, but rather "forks" off other threads
9264  * to do that as needed.
9265  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
9266  * "->recovery" and create a thread at ->sync_thread.
9267  * When the thread finishes it sets MD_RECOVERY_DONE
9268  * and wakeups up this thread which will reap the thread and finish up.
9269  * This thread also removes any faulty devices (with nr_pending == 0).
9270  *
9271  * The overall approach is:
9272  *  1/ if the superblock needs updating, update it.
9273  *  2/ If a recovery thread is running, don't do anything else.
9274  *  3/ If recovery has finished, clean up, possibly marking spares active.
9275  *  4/ If there are any faulty devices, remove them.
9276  *  5/ If array is degraded, try to add spares devices
9277  *  6/ If array has spares or is not in-sync, start a resync thread.
9278  */
9279 void md_check_recovery(struct mddev *mddev)
9280 {
9281         if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
9282                 /* Write superblock - thread that called mddev_suspend()
9283                  * holds reconfig_mutex for us.
9284                  */
9285                 set_bit(MD_UPDATING_SB, &mddev->flags);
9286                 smp_mb__after_atomic();
9287                 if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
9288                         md_update_sb(mddev, 0);
9289                 clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
9290                 wake_up(&mddev->sb_wait);
9291         }
9292
9293         if (is_md_suspended(mddev))
9294                 return;
9295
9296         if (mddev->bitmap)
9297                 md_bitmap_daemon_work(mddev);
9298
9299         if (signal_pending(current)) {
9300                 if (mddev->pers->sync_request && !mddev->external) {
9301                         pr_debug("md: %s in immediate safe mode\n",
9302                                  mdname(mddev));
9303                         mddev->safemode = 2;
9304                 }
9305                 flush_signals(current);
9306         }
9307
9308         if (!md_is_rdwr(mddev) &&
9309             !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
9310                 return;
9311         if ( ! (
9312                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
9313                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9314                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
9315                 (mddev->external == 0 && mddev->safemode == 1) ||
9316                 (mddev->safemode == 2
9317                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
9318                 ))
9319                 return;
9320
9321         if (mddev_trylock(mddev)) {
9322                 int spares = 0;
9323                 bool try_set_sync = mddev->safemode != 0;
9324
9325                 if (!mddev->external && mddev->safemode == 1)
9326                         mddev->safemode = 0;
9327
9328                 if (!md_is_rdwr(mddev)) {
9329                         struct md_rdev *rdev;
9330                         if (!mddev->external && mddev->in_sync)
9331                                 /* 'Blocked' flag not needed as failed devices
9332                                  * will be recorded if array switched to read/write.
9333                                  * Leaving it set will prevent the device
9334                                  * from being removed.
9335                                  */
9336                                 rdev_for_each(rdev, mddev)
9337                                         clear_bit(Blocked, &rdev->flags);
9338                         /* On a read-only array we can:
9339                          * - remove failed devices
9340                          * - add already-in_sync devices if the array itself
9341                          *   is in-sync.
9342                          * As we only add devices that are already in-sync,
9343                          * we can activate the spares immediately.
9344                          */
9345                         remove_and_add_spares(mddev, NULL);
9346                         /* There is no thread, but we need to call
9347                          * ->spare_active and clear saved_raid_disk
9348                          */
9349                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
9350                         md_reap_sync_thread(mddev);
9351                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9352                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9353                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
9354                         goto unlock;
9355                 }
9356
9357                 if (mddev_is_clustered(mddev)) {
9358                         struct md_rdev *rdev, *tmp;
9359                         /* kick the device if another node issued a
9360                          * remove disk.
9361                          */
9362                         rdev_for_each_safe(rdev, tmp, mddev) {
9363                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9364                                                 rdev->raid_disk < 0)
9365                                         md_kick_rdev_from_array(rdev);
9366                         }
9367                 }
9368
9369                 if (try_set_sync && !mddev->external && !mddev->in_sync) {
9370                         spin_lock(&mddev->lock);
9371                         set_in_sync(mddev);
9372                         spin_unlock(&mddev->lock);
9373                 }
9374
9375                 if (mddev->sb_flags)
9376                         md_update_sb(mddev, 0);
9377
9378                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
9379                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
9380                         /* resync/recovery still happening */
9381                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9382                         goto unlock;
9383                 }
9384                 if (mddev->sync_thread) {
9385                         md_reap_sync_thread(mddev);
9386                         goto unlock;
9387                 }
9388                 /* Set RUNNING before clearing NEEDED to avoid
9389                  * any transients in the value of "sync_action".
9390                  */
9391                 mddev->curr_resync_completed = 0;
9392                 spin_lock(&mddev->lock);
9393                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9394                 spin_unlock(&mddev->lock);
9395                 /* Clear some bits that don't mean anything, but
9396                  * might be left set
9397                  */
9398                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
9399                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9400
9401                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
9402                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
9403                         goto not_running;
9404                 /* no recovery is running.
9405                  * remove any failed drives, then
9406                  * add spares if possible.
9407                  * Spares are also removed and re-added, to allow
9408                  * the personality to fail the re-add.
9409                  */
9410
9411                 if (mddev->reshape_position != MaxSector) {
9412                         if (mddev->pers->check_reshape == NULL ||
9413                             mddev->pers->check_reshape(mddev) != 0)
9414                                 /* Cannot proceed */
9415                                 goto not_running;
9416                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9417                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9418                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
9419                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9420                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9421                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9422                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9423                 } else if (mddev->recovery_cp < MaxSector) {
9424                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9425                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
9426                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
9427                         /* nothing to be done ... */
9428                         goto not_running;
9429
9430                 if (mddev->pers->sync_request) {
9431                         if (spares) {
9432                                 /* We are adding a device or devices to an array
9433                                  * which has the bitmap stored on all devices.
9434                                  * So make sure all bitmap pages get written
9435                                  */
9436                                 md_bitmap_write_all(mddev->bitmap);
9437                         }
9438                         INIT_WORK(&mddev->del_work, md_start_sync);
9439                         queue_work(md_misc_wq, &mddev->del_work);
9440                         goto unlock;
9441                 }
9442         not_running:
9443                 if (!mddev->sync_thread) {
9444                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9445                         wake_up(&resync_wait);
9446                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
9447                                                &mddev->recovery))
9448                                 if (mddev->sysfs_action)
9449                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
9450                 }
9451         unlock:
9452                 wake_up(&mddev->sb_wait);
9453                 mddev_unlock(mddev);
9454         }
9455 }
9456 EXPORT_SYMBOL(md_check_recovery);
9457
9458 void md_reap_sync_thread(struct mddev *mddev)
9459 {
9460         struct md_rdev *rdev;
9461         sector_t old_dev_sectors = mddev->dev_sectors;
9462         bool is_reshaped = false;
9463
9464         /* resync has finished, collect result */
9465         md_unregister_thread(&mddev->sync_thread);
9466         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
9467             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
9468             mddev->degraded != mddev->raid_disks) {
9469                 /* success...*/
9470                 /* activate any spares */
9471                 if (mddev->pers->spare_active(mddev)) {
9472                         sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9473                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
9474                 }
9475         }
9476         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
9477             mddev->pers->finish_reshape) {
9478                 mddev->pers->finish_reshape(mddev);
9479                 if (mddev_is_clustered(mddev))
9480                         is_reshaped = true;
9481         }
9482
9483         /* If array is no-longer degraded, then any saved_raid_disk
9484          * information must be scrapped.
9485          */
9486         if (!mddev->degraded)
9487                 rdev_for_each(rdev, mddev)
9488                         rdev->saved_raid_disk = -1;
9489
9490         md_update_sb(mddev, 1);
9491         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
9492          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
9493          * clustered raid */
9494         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
9495                 md_cluster_ops->resync_finish(mddev);
9496         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
9497         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
9498         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
9499         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
9500         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
9501         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
9502         /*
9503          * We call md_cluster_ops->update_size here because sync_size could
9504          * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
9505          * so it is time to update size across cluster.
9506          */
9507         if (mddev_is_clustered(mddev) && is_reshaped
9508                                       && !test_bit(MD_CLOSING, &mddev->flags))
9509                 md_cluster_ops->update_size(mddev, old_dev_sectors);
9510         wake_up(&resync_wait);
9511         /* flag recovery needed just to double check */
9512         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9513         sysfs_notify_dirent_safe(mddev->sysfs_completed);
9514         sysfs_notify_dirent_safe(mddev->sysfs_action);
9515         md_new_event();
9516         if (mddev->event_work.func)
9517                 queue_work(md_misc_wq, &mddev->event_work);
9518 }
9519 EXPORT_SYMBOL(md_reap_sync_thread);
9520
9521 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9522 {
9523         sysfs_notify_dirent_safe(rdev->sysfs_state);
9524         wait_event_timeout(rdev->blocked_wait,
9525                            !test_bit(Blocked, &rdev->flags) &&
9526                            !test_bit(BlockedBadBlocks, &rdev->flags),
9527                            msecs_to_jiffies(5000));
9528         rdev_dec_pending(rdev, mddev);
9529 }
9530 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
9531
9532 void md_finish_reshape(struct mddev *mddev)
9533 {
9534         /* called be personality module when reshape completes. */
9535         struct md_rdev *rdev;
9536
9537         rdev_for_each(rdev, mddev) {
9538                 if (rdev->data_offset > rdev->new_data_offset)
9539                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9540                 else
9541                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9542                 rdev->data_offset = rdev->new_data_offset;
9543         }
9544 }
9545 EXPORT_SYMBOL(md_finish_reshape);
9546
9547 /* Bad block management */
9548
9549 /* Returns 1 on success, 0 on failure */
9550 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9551                        int is_new)
9552 {
9553         struct mddev *mddev = rdev->mddev;
9554         int rv;
9555         if (is_new)
9556                 s += rdev->new_data_offset;
9557         else
9558                 s += rdev->data_offset;
9559         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9560         if (rv == 0) {
9561                 /* Make sure they get written out promptly */
9562                 if (test_bit(ExternalBbl, &rdev->flags))
9563                         sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9564                 sysfs_notify_dirent_safe(rdev->sysfs_state);
9565                 set_mask_bits(&mddev->sb_flags, 0,
9566                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
9567                 md_wakeup_thread(rdev->mddev->thread);
9568                 return 1;
9569         } else
9570                 return 0;
9571 }
9572 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
9573
9574 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9575                          int is_new)
9576 {
9577         int rv;
9578         if (is_new)
9579                 s += rdev->new_data_offset;
9580         else
9581                 s += rdev->data_offset;
9582         rv = badblocks_clear(&rdev->badblocks, s, sectors);
9583         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9584                 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9585         return rv;
9586 }
9587 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
9588
9589 static int md_notify_reboot(struct notifier_block *this,
9590                             unsigned long code, void *x)
9591 {
9592         struct mddev *mddev, *n;
9593         int need_delay = 0;
9594
9595         spin_lock(&all_mddevs_lock);
9596         list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
9597                 if (!mddev_get(mddev))
9598                         continue;
9599                 spin_unlock(&all_mddevs_lock);
9600                 if (mddev_trylock(mddev)) {
9601                         if (mddev->pers)
9602                                 __md_stop_writes(mddev);
9603                         if (mddev->persistent)
9604                                 mddev->safemode = 2;
9605                         mddev_unlock(mddev);
9606                 }
9607                 need_delay = 1;
9608                 mddev_put(mddev);
9609                 spin_lock(&all_mddevs_lock);
9610         }
9611         spin_unlock(&all_mddevs_lock);
9612
9613         /*
9614          * certain more exotic SCSI devices are known to be
9615          * volatile wrt too early system reboots. While the
9616          * right place to handle this issue is the given
9617          * driver, we do want to have a safe RAID driver ...
9618          */
9619         if (need_delay)
9620                 msleep(1000);
9621
9622         return NOTIFY_DONE;
9623 }
9624
9625 static struct notifier_block md_notifier = {
9626         .notifier_call  = md_notify_reboot,
9627         .next           = NULL,
9628         .priority       = INT_MAX, /* before any real devices */
9629 };
9630
9631 static void md_geninit(void)
9632 {
9633         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
9634
9635         proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops);
9636 }
9637
9638 static int __init md_init(void)
9639 {
9640         int ret = -ENOMEM;
9641
9642         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
9643         if (!md_wq)
9644                 goto err_wq;
9645
9646         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
9647         if (!md_misc_wq)
9648                 goto err_misc_wq;
9649
9650         md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
9651                                        0);
9652         if (!md_bitmap_wq)
9653                 goto err_bitmap_wq;
9654
9655         ret = __register_blkdev(MD_MAJOR, "md", md_probe);
9656         if (ret < 0)
9657                 goto err_md;
9658
9659         ret = __register_blkdev(0, "mdp", md_probe);
9660         if (ret < 0)
9661                 goto err_mdp;
9662         mdp_major = ret;
9663
9664         register_reboot_notifier(&md_notifier);
9665         raid_table_header = register_sysctl("dev/raid", raid_table);
9666
9667         md_geninit();
9668         return 0;
9669
9670 err_mdp:
9671         unregister_blkdev(MD_MAJOR, "md");
9672 err_md:
9673         destroy_workqueue(md_bitmap_wq);
9674 err_bitmap_wq:
9675         destroy_workqueue(md_misc_wq);
9676 err_misc_wq:
9677         destroy_workqueue(md_wq);
9678 err_wq:
9679         return ret;
9680 }
9681
9682 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9683 {
9684         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9685         struct md_rdev *rdev2, *tmp;
9686         int role, ret;
9687
9688         /*
9689          * If size is changed in another node then we need to
9690          * do resize as well.
9691          */
9692         if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
9693                 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
9694                 if (ret)
9695                         pr_info("md-cluster: resize failed\n");
9696                 else
9697                         md_bitmap_update_sb(mddev->bitmap);
9698         }
9699
9700         /* Check for change of roles in the active devices */
9701         rdev_for_each_safe(rdev2, tmp, mddev) {
9702                 if (test_bit(Faulty, &rdev2->flags))
9703                         continue;
9704
9705                 /* Check if the roles changed */
9706                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
9707
9708                 if (test_bit(Candidate, &rdev2->flags)) {
9709                         if (role == MD_DISK_ROLE_FAULTY) {
9710                                 pr_info("md: Removing Candidate device %pg because add failed\n",
9711                                         rdev2->bdev);
9712                                 md_kick_rdev_from_array(rdev2);
9713                                 continue;
9714                         }
9715                         else
9716                                 clear_bit(Candidate, &rdev2->flags);
9717                 }
9718
9719                 if (role != rdev2->raid_disk) {
9720                         /*
9721                          * got activated except reshape is happening.
9722                          */
9723                         if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE &&
9724                             !(le32_to_cpu(sb->feature_map) &
9725                               MD_FEATURE_RESHAPE_ACTIVE)) {
9726                                 rdev2->saved_raid_disk = role;
9727                                 ret = remove_and_add_spares(mddev, rdev2);
9728                                 pr_info("Activated spare: %pg\n",
9729                                         rdev2->bdev);
9730                                 /* wakeup mddev->thread here, so array could
9731                                  * perform resync with the new activated disk */
9732                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
9733                                 md_wakeup_thread(mddev->thread);
9734                         }
9735                         /* device faulty
9736                          * We just want to do the minimum to mark the disk
9737                          * as faulty. The recovery is performed by the
9738                          * one who initiated the error.
9739                          */
9740                         if (role == MD_DISK_ROLE_FAULTY ||
9741                             role == MD_DISK_ROLE_JOURNAL) {
9742                                 md_error(mddev, rdev2);
9743                                 clear_bit(Blocked, &rdev2->flags);
9744                         }
9745                 }
9746         }
9747
9748         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
9749                 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
9750                 if (ret)
9751                         pr_warn("md: updating array disks failed. %d\n", ret);
9752         }
9753
9754         /*
9755          * Since mddev->delta_disks has already updated in update_raid_disks,
9756          * so it is time to check reshape.
9757          */
9758         if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9759             (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9760                 /*
9761                  * reshape is happening in the remote node, we need to
9762                  * update reshape_position and call start_reshape.
9763                  */
9764                 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
9765                 if (mddev->pers->update_reshape_pos)
9766                         mddev->pers->update_reshape_pos(mddev);
9767                 if (mddev->pers->start_reshape)
9768                         mddev->pers->start_reshape(mddev);
9769         } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
9770                    mddev->reshape_position != MaxSector &&
9771                    !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
9772                 /* reshape is just done in another node. */
9773                 mddev->reshape_position = MaxSector;
9774                 if (mddev->pers->update_reshape_pos)
9775                         mddev->pers->update_reshape_pos(mddev);
9776         }
9777
9778         /* Finally set the event to be up to date */
9779         mddev->events = le64_to_cpu(sb->events);
9780 }
9781
9782 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9783 {
9784         int err;
9785         struct page *swapout = rdev->sb_page;
9786         struct mdp_superblock_1 *sb;
9787
9788         /* Store the sb page of the rdev in the swapout temporary
9789          * variable in case we err in the future
9790          */
9791         rdev->sb_page = NULL;
9792         err = alloc_disk_sb(rdev);
9793         if (err == 0) {
9794                 ClearPageUptodate(rdev->sb_page);
9795                 rdev->sb_loaded = 0;
9796                 err = super_types[mddev->major_version].
9797                         load_super(rdev, NULL, mddev->minor_version);
9798         }
9799         if (err < 0) {
9800                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9801                                 __func__, __LINE__, rdev->desc_nr, err);
9802                 if (rdev->sb_page)
9803                         put_page(rdev->sb_page);
9804                 rdev->sb_page = swapout;
9805                 rdev->sb_loaded = 1;
9806                 return err;
9807         }
9808
9809         sb = page_address(rdev->sb_page);
9810         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
9811          * is not set
9812          */
9813
9814         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
9815                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9816
9817         /* The other node finished recovery, call spare_active to set
9818          * device In_sync and mddev->degraded
9819          */
9820         if (rdev->recovery_offset == MaxSector &&
9821             !test_bit(In_sync, &rdev->flags) &&
9822             mddev->pers->spare_active(mddev))
9823                 sysfs_notify_dirent_safe(mddev->sysfs_degraded);
9824
9825         put_page(swapout);
9826         return 0;
9827 }
9828
9829 void md_reload_sb(struct mddev *mddev, int nr)
9830 {
9831         struct md_rdev *rdev = NULL, *iter;
9832         int err;
9833
9834         /* Find the rdev */
9835         rdev_for_each_rcu(iter, mddev) {
9836                 if (iter->desc_nr == nr) {
9837                         rdev = iter;
9838                         break;
9839                 }
9840         }
9841
9842         if (!rdev) {
9843                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9844                 return;
9845         }
9846
9847         err = read_rdev(mddev, rdev);
9848         if (err < 0)
9849                 return;
9850
9851         check_sb_changes(mddev, rdev);
9852
9853         /* Read all rdev's to update recovery_offset */
9854         rdev_for_each_rcu(rdev, mddev) {
9855                 if (!test_bit(Faulty, &rdev->flags))
9856                         read_rdev(mddev, rdev);
9857         }
9858 }
9859 EXPORT_SYMBOL(md_reload_sb);
9860
9861 #ifndef MODULE
9862
9863 /*
9864  * Searches all registered partitions for autorun RAID arrays
9865  * at boot time.
9866  */
9867
9868 static DEFINE_MUTEX(detected_devices_mutex);
9869 static LIST_HEAD(all_detected_devices);
9870 struct detected_devices_node {
9871         struct list_head list;
9872         dev_t dev;
9873 };
9874
9875 void md_autodetect_dev(dev_t dev)
9876 {
9877         struct detected_devices_node *node_detected_dev;
9878
9879         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
9880         if (node_detected_dev) {
9881                 node_detected_dev->dev = dev;
9882                 mutex_lock(&detected_devices_mutex);
9883                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
9884                 mutex_unlock(&detected_devices_mutex);
9885         }
9886 }
9887
9888 void md_autostart_arrays(int part)
9889 {
9890         struct md_rdev *rdev;
9891         struct detected_devices_node *node_detected_dev;
9892         dev_t dev;
9893         int i_scanned, i_passed;
9894
9895         i_scanned = 0;
9896         i_passed = 0;
9897
9898         pr_info("md: Autodetecting RAID arrays.\n");
9899
9900         mutex_lock(&detected_devices_mutex);
9901         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
9902                 i_scanned++;
9903                 node_detected_dev = list_entry(all_detected_devices.next,
9904                                         struct detected_devices_node, list);
9905                 list_del(&node_detected_dev->list);
9906                 dev = node_detected_dev->dev;
9907                 kfree(node_detected_dev);
9908                 mutex_unlock(&detected_devices_mutex);
9909                 rdev = md_import_device(dev,0, 90);
9910                 mutex_lock(&detected_devices_mutex);
9911                 if (IS_ERR(rdev))
9912                         continue;
9913
9914                 if (test_bit(Faulty, &rdev->flags))
9915                         continue;
9916
9917                 set_bit(AutoDetected, &rdev->flags);
9918                 list_add(&rdev->same_set, &pending_raid_disks);
9919                 i_passed++;
9920         }
9921         mutex_unlock(&detected_devices_mutex);
9922
9923         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
9924
9925         autorun_devices(part);
9926 }
9927
9928 #endif /* !MODULE */
9929
9930 static __exit void md_exit(void)
9931 {
9932         struct mddev *mddev, *n;
9933         int delay = 1;
9934
9935         unregister_blkdev(MD_MAJOR,"md");
9936         unregister_blkdev(mdp_major, "mdp");
9937         unregister_reboot_notifier(&md_notifier);
9938         unregister_sysctl_table(raid_table_header);
9939
9940         /* We cannot unload the modules while some process is
9941          * waiting for us in select() or poll() - wake them up
9942          */
9943         md_unloading = 1;
9944         while (waitqueue_active(&md_event_waiters)) {
9945                 /* not safe to leave yet */
9946                 wake_up(&md_event_waiters);
9947                 msleep(delay);
9948                 delay += delay;
9949         }
9950         remove_proc_entry("mdstat", NULL);
9951
9952         spin_lock(&all_mddevs_lock);
9953         list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) {
9954                 if (!mddev_get(mddev))
9955                         continue;
9956                 spin_unlock(&all_mddevs_lock);
9957                 export_array(mddev);
9958                 mddev->ctime = 0;
9959                 mddev->hold_active = 0;
9960                 /*
9961                  * As the mddev is now fully clear, mddev_put will schedule
9962                  * the mddev for destruction by a workqueue, and the
9963                  * destroy_workqueue() below will wait for that to complete.
9964                  */
9965                 mddev_put(mddev);
9966                 spin_lock(&all_mddevs_lock);
9967         }
9968         spin_unlock(&all_mddevs_lock);
9969
9970         destroy_workqueue(md_misc_wq);
9971         destroy_workqueue(md_bitmap_wq);
9972         destroy_workqueue(md_wq);
9973 }
9974
9975 subsys_initcall(md_init);
9976 module_exit(md_exit)
9977
9978 static int get_ro(char *buffer, const struct kernel_param *kp)
9979 {
9980         return sprintf(buffer, "%d\n", start_readonly);
9981 }
9982 static int set_ro(const char *val, const struct kernel_param *kp)
9983 {
9984         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9985 }
9986
9987 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9988 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9989 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9990 module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
9991
9992 MODULE_LICENSE("GPL");
9993 MODULE_DESCRIPTION("MD RAID framework");
9994 MODULE_ALIAS("md");
9995 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);