Merge tag 'md-3.7' of git://neil.brown.name/md
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Oct 2012 20:22:01 +0000 (13:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 13 Oct 2012 20:22:01 +0000 (13:22 -0700)
Pull md updates from NeilBrown:
 - "discard" support, some dm-raid improvements and other assorted bits
   and pieces.

* tag 'md-3.7' of git://neil.brown.name/md: (29 commits)
  md: refine reporting of resync/reshape delays.
  md/raid5: be careful not to resize_stripes too big.
  md: make sure manual changes to recovery checkpoint are saved.
  md/raid10: use correct limit variable
  md: writing to sync_action should clear the read-auto state.
  Subject: [PATCH] md:change resync_mismatches to atomic64_t to avoid races
  md/raid5: make sure to_read and to_write never go negative.
  md: When RAID5 is dirty, force reconstruct-write instead of read-modify-write.
  md/raid5: protect debug message against NULL derefernce.
  md/raid5: add some missing locking in handle_failed_stripe.
  MD: raid5 avoid unnecessary zero page for trim
  MD: raid5 trim support
  md/bitmap:Don't use IS_ERR to judge alloc_page().
  md/raid1: Don't release reference to device while handling read error.
  raid: replace list_for_each_continue_rcu with new interface
  add further __init annotations to crypto/xor.c
  DM RAID: Fix for "sync" directive ineffectiveness
  DM RAID: Fix comparison of index and quantity for "rebuild" parameter
  DM RAID: Add rebuild capability for RAID10
  DM RAID: Move 'rebuild' checking code to its own function
  ...

1  2 
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c

diff --combined drivers/md/md.c
@@@ -674,7 -674,18 +674,18 @@@ static struct md_rdev * find_rdev_nr(st
        return NULL;
  }
  
- static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev)
+ static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
+ {
+       struct md_rdev *rdev;
+       rdev_for_each_rcu(rdev, mddev)
+               if (rdev->desc_nr == nr)
+                       return rdev;
+       return NULL;
+ }
+ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
  {
        struct md_rdev *rdev;
  
        return NULL;
  }
  
+ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
+ {
+       struct md_rdev *rdev;
+       rdev_for_each_rcu(rdev, mddev)
+               if (rdev->bdev->bd_dev == dev)
+                       return rdev;
+       return NULL;
+ }
  static struct md_personality *find_pers(int level, char *clevel)
  {
        struct md_personality *pers;
@@@ -2022,8 -2044,14 +2044,14 @@@ EXPORT_SYMBOL(md_integrity_register)
  /* Disable data integrity if non-capable/non-matching disk is being added */
  void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
  {
-       struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev);
-       struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk);
+       struct blk_integrity *bi_rdev;
+       struct blk_integrity *bi_mddev;
+       if (!mddev->gendisk)
+               return;
+       bi_rdev = bdev_get_integrity(rdev->bdev);
+       bi_mddev = blk_get_integrity(mddev->gendisk);
  
        if (!bi_mddev) /* nothing to do */
                return;
@@@ -3754,6 -3782,8 +3782,8 @@@ resync_start_store(struct mddev *mddev
                return -EINVAL;
  
        mddev->recovery_cp = n;
+       if (mddev->pers)
+               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
        return len;
  }
  static struct md_sysfs_entry md_resync_start =
@@@ -4231,6 -4261,13 +4261,13 @@@ action_store(struct mddev *mddev, cons
                set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        }
+       if (mddev->ro == 2) {
+               /* A write to sync_action is enough to justify
+                * canceling read-auto mode
+                */
+               mddev->ro = 0;
+               md_wakeup_thread(mddev->sync_thread);
+       }
        set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        md_wakeup_thread(mddev->thread);
        sysfs_notify_dirent_safe(mddev->sysfs_action);
@@@ -4241,7 -4278,8 +4278,8 @@@ static ssize_
  mismatch_cnt_show(struct mddev *mddev, char *page)
  {
        return sprintf(page, "%llu\n",
-                      (unsigned long long) mddev->resync_mismatches);
+                      (unsigned long long)
+                      atomic64_read(&mddev->resync_mismatches));
  }
  
  static struct md_sysfs_entry md_scan_mode =
@@@ -4362,6 -4400,10 +4400,10 @@@ sync_completed_show(struct mddev *mddev
        if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
                return sprintf(page, "none\n");
  
+       if (mddev->curr_resync == 1 ||
+           mddev->curr_resync == 2)
+               return sprintf(page, "delayed\n");
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
            test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
                max_sectors = mddev->resync_max_sectors;
@@@ -5207,7 -5249,7 +5249,7 @@@ static void md_clean(struct mddev *mdde
        mddev->new_layout = 0;
        mddev->new_chunk_sectors = 0;
        mddev->curr_resync = 0;
-       mddev->resync_mismatches = 0;
+       atomic64_set(&mddev->resync_mismatches, 0);
        mddev->suspend_lo = mddev->suspend_hi = 0;
        mddev->sync_speed_min = mddev->sync_speed_max = 0;
        mddev->recovery = 0;
@@@ -5509,8 -5551,9 +5551,9 @@@ static int get_array_info(struct mddev 
        int nr,working,insync,failed,spare;
        struct md_rdev *rdev;
  
-       nr=working=insync=failed=spare=0;
-       rdev_for_each(rdev, mddev) {
+       nr = working = insync = failed = spare = 0;
+       rcu_read_lock();
+       rdev_for_each_rcu(rdev, mddev) {
                nr++;
                if (test_bit(Faulty, &rdev->flags))
                        failed++;
                                spare++;
                }
        }
+       rcu_read_unlock();
  
        info.major_version = mddev->major_version;
        info.minor_version = mddev->minor_version;
@@@ -5605,7 -5649,8 +5649,8 @@@ static int get_disk_info(struct mddev 
        if (copy_from_user(&info, arg, sizeof(info)))
                return -EFAULT;
  
-       rdev = find_rdev_nr(mddev, info.number);
+       rcu_read_lock();
+       rdev = find_rdev_nr_rcu(mddev, info.number);
        if (rdev) {
                info.major = MAJOR(rdev->bdev->bd_dev);
                info.minor = MINOR(rdev->bdev->bd_dev);
                info.raid_disk = -1;
                info.state = (1<<MD_DISK_REMOVED);
        }
+       rcu_read_unlock();
  
        if (copy_to_user(arg, &info, sizeof(info)))
                return -EFAULT;
@@@ -6232,18 -6278,22 +6278,22 @@@ static int update_array_info(struct mdd
  static int set_disk_faulty(struct mddev *mddev, dev_t dev)
  {
        struct md_rdev *rdev;
+       int err = 0;
  
        if (mddev->pers == NULL)
                return -ENODEV;
  
-       rdev = find_rdev(mddev, dev);
+       rcu_read_lock();
+       rdev = find_rdev_rcu(mddev, dev);
        if (!rdev)
-               return -ENODEV;
-       md_error(mddev, rdev);
-       if (!test_bit(Faulty, &rdev->flags))
-               return -EBUSY;
-       return 0;
+               err =  -ENODEV;
+       else {
+               md_error(mddev, rdev);
+               if (!test_bit(Faulty, &rdev->flags))
+                       err = -EBUSY;
+       }
+       rcu_read_unlock();
+       return err;
  }
  
  /*
@@@ -6315,6 -6365,27 +6365,27 @@@ static int md_ioctl(struct block_devic
                goto abort;
        }
  
+       /* Some actions do not requires the mutex */
+       switch (cmd) {
+       case GET_ARRAY_INFO:
+               if (!mddev->raid_disks && !mddev->external)
+                       err = -ENODEV;
+               else
+                       err = get_array_info(mddev, argp);
+               goto abort;
+       case GET_DISK_INFO:
+               if (!mddev->raid_disks && !mddev->external)
+                       err = -ENODEV;
+               else
+                       err = get_disk_info(mddev, argp);
+               goto abort;
+       case SET_DISK_FAULTY:
+               err = set_disk_faulty(mddev, new_decode_dev(arg));
+               goto abort;
+       }
        err = mddev_lock(mddev);
        if (err) {
                printk(KERN_INFO 
         */
        switch (cmd)
        {
-               case GET_ARRAY_INFO:
-                       err = get_array_info(mddev, argp);
-                       goto done_unlock;
                case GET_BITMAP_FILE:
                        err = get_bitmap_file(mddev, argp);
                        goto done_unlock;
  
-               case GET_DISK_INFO:
-                       err = get_disk_info(mddev, argp);
-                       goto done_unlock;
                case RESTART_ARRAY_RW:
                        err = restart_array(mddev);
                        goto done_unlock;
                        err = hot_add_disk(mddev, new_decode_dev(arg));
                        goto done_unlock;
  
-               case SET_DISK_FAULTY:
-                       err = set_disk_faulty(mddev, new_decode_dev(arg));
-                       goto done_unlock;
                case RUN_ARRAY:
                        err = do_md_run(mddev);
                        goto done_unlock;
@@@ -6641,7 -6700,7 +6700,7 @@@ static int md_thread(void * arg
  
                clear_bit(THREAD_WAKEUP, &thread->flags);
                if (!kthread_should_stop())
-                       thread->run(thread->mddev);
+                       thread->run(thread);
        }
  
        return 0;
@@@ -6656,8 -6715,8 +6715,8 @@@ void md_wakeup_thread(struct md_thread 
        }
  }
  
- struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev,
-                                const char *name)
+ struct md_thread *md_register_thread(void (*run) (struct md_thread *),
+               struct mddev *mddev, const char *name)
  {
        struct md_thread *thread;
  
@@@ -6752,7 -6811,11 +6811,11 @@@ static void status_resync(struct seq_fi
        int scale;
        unsigned int per_milli;
  
-       resync = mddev->curr_resync - atomic_read(&mddev->recovery_active);
+       if (mddev->curr_resync <= 3)
+               resync = 0;
+       else
+               resync = mddev->curr_resync
+                       - atomic_read(&mddev->recovery_active);
  
        if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
            test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
@@@ -6978,7 -7041,7 +7041,7 @@@ static int md_seq_show(struct seq_file 
                                if (mddev->curr_resync > 2) {
                                        status_resync(seq, mddev);
                                        seq_printf(seq, "\n      ");
-                               } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
+                               } else if (mddev->curr_resync >= 1)
                                        seq_printf(seq, "\tresync=DELAYED\n      ");
                                else if (mddev->recovery_cp < MaxSector)
                                        seq_printf(seq, "\tresync=PENDING\n      ");
@@@ -7206,8 -7269,9 +7269,9 @@@ EXPORT_SYMBOL_GPL(md_allow_write)
  
  #define SYNC_MARKS    10
  #define       SYNC_MARK_STEP  (3*HZ)
- void md_do_sync(struct mddev *mddev)
+ void md_do_sync(struct md_thread *thread)
  {
+       struct mddev *mddev = thread->mddev;
        struct mddev *mddev2;
        unsigned int currspeed = 0,
                 window;
                 * which defaults to physical size, but can be virtual size
                 */
                max_sectors = mddev->resync_max_sectors;
-               mddev->resync_mismatches = 0;
+               atomic64_set(&mddev->resync_mismatches, 0);
                /* we don't use the checkpoint if there's a bitmap */
                if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
                        j = mddev->resync_min;
                       "md: resuming %s of %s from checkpoint.\n",
                       desc, mdname(mddev));
                mddev->curr_resync = j;
-       }
+       } else
+               mddev->curr_resync = 3; /* no longer delayed */
        mddev->curr_resync_completed = j;
+       sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+       md_new_event(mddev);
  
        blk_start_plug(&plug);
        while (j < max_sectors) {
                        break;
  
                j += sectors;
-               if (j>1) mddev->curr_resync = j;
+               if (j > 2)
+                       mddev->curr_resync = j;
                mddev->curr_mark_cnt = io_sectors;
                if (last_check == 0)
                        /* this is the earliest that rebuild will be
@@@ -7543,8 -7611,6 +7611,6 @@@ static int remove_and_add_spares(struc
        int spares = 0;
        int removed = 0;
  
-       mddev->curr_resync_completed = 0;
        rdev_for_each(rdev, mddev)
                if (rdev->raid_disk >= 0 &&
                    !test_bit(Blocked, &rdev->flags) &&
                        }
                }
        }
 +      if (removed)
 +              set_bit(MD_CHANGE_DEVS, &mddev->flags);
        return spares;
  }
  
@@@ -7596,11 -7660,9 +7662,11 @@@ static void reap_sync_thread(struct mdd
            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
                /* success...*/
                /* activate any spares */
 -              if (mddev->pers->spare_active(mddev))
 +              if (mddev->pers->spare_active(mddev)) {
                        sysfs_notify(&mddev->kobj, NULL,
                                     "degraded");
 +                      set_bit(MD_CHANGE_DEVS, &mddev->flags);
 +              }
        }
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            mddev->pers->finish_reshape)
@@@ -7739,6 -7801,7 +7805,7 @@@ void md_check_recovery(struct mddev *md
                /* Set RUNNING before clearing NEEDED to avoid
                 * any transients in the value of "sync_action".
                 */
+               mddev->curr_resync_completed = 0;
                set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
                /* Clear some bits that don't mean anything, but
                 * might be left set
                /* no recovery is running.
                 * remove any failed drives, then
                 * add spares if possible.
-                * Spare are also removed and re-added, to allow
+                * Spares are also removed and re-added, to allow
                 * the personality to fail the re-add.
                 */
  
diff --combined drivers/md/raid10.c
@@@ -911,7 -911,12 +911,12 @@@ static void flush_pending_writes(struc
                while (bio) { /* submit pending writes */
                        struct bio *next = bio->bi_next;
                        bio->bi_next = NULL;
-                       generic_make_request(bio);
+                       if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+                           !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+                               /* Just ignore it */
+                               bio_endio(bio, 0);
+                       else
+                               generic_make_request(bio);
                        bio = next;
                }
        } else
@@@ -1050,6 -1055,44 +1055,44 @@@ static sector_t choose_data_offset(stru
                return rdev->new_data_offset;
  }
  
+ struct raid10_plug_cb {
+       struct blk_plug_cb      cb;
+       struct bio_list         pending;
+       int                     pending_cnt;
+ };
+ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+ {
+       struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
+                                                  cb);
+       struct mddev *mddev = plug->cb.data;
+       struct r10conf *conf = mddev->private;
+       struct bio *bio;
+       if (from_schedule) {
+               spin_lock_irq(&conf->device_lock);
+               bio_list_merge(&conf->pending_bio_list, &plug->pending);
+               conf->pending_count += plug->pending_cnt;
+               spin_unlock_irq(&conf->device_lock);
+               md_wakeup_thread(mddev->thread);
+               kfree(plug);
+               return;
+       }
+       /* we aren't scheduling, so we can do the write-out directly. */
+       bio = bio_list_get(&plug->pending);
+       bitmap_unplug(mddev->bitmap);
+       wake_up(&conf->wait_barrier);
+       while (bio) { /* submit pending writes */
+               struct bio *next = bio->bi_next;
+               bio->bi_next = NULL;
+               generic_make_request(bio);
+               bio = next;
+       }
+       kfree(plug);
+ }
  static void make_request(struct mddev *mddev, struct bio * bio)
  {
        struct r10conf *conf = mddev->private;
        const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+       const unsigned long do_discard = (bio->bi_rw
+                                         & (REQ_DISCARD | REQ_SECURE));
        unsigned long flags;
        struct md_rdev *blocked_rdev;
+       struct blk_plug_cb *cb;
+       struct raid10_plug_cb *plug = NULL;
        int sectors_handled;
        int max_sectors;
        int sectors;
                         || conf->prev.near_copies < conf->prev.raid_disks))) {
                struct bio_pair *bp;
                /* Sanity check -- queue functions should prevent this happening */
-               if (bio->bi_vcnt != 1 ||
+               if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
                    bio->bi_idx != 0)
                        goto bad_map;
                /* This is a one page bio that upper layers
@@@ -1410,15 -1457,26 +1457,26 @@@ retry_write
                                                      conf->mirrors[d].rdev));
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua;
+               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
                mbio->bi_private = r10_bio;
  
                atomic_inc(&r10_bio->remaining);
+               cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
+               if (cb)
+                       plug = container_of(cb, struct raid10_plug_cb, cb);
+               else
+                       plug = NULL;
                spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
+               if (plug) {
+                       bio_list_add(&plug->pending, mbio);
+                       plug->pending_cnt++;
+               } else {
+                       bio_list_add(&conf->pending_bio_list, mbio);
+                       conf->pending_count++;
+               }
                spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!mddev_check_plugged(mddev))
+               if (!plug)
                        md_wakeup_thread(mddev->thread);
  
                if (!r10_bio->devs[i].repl_bio)
                                           conf->mirrors[d].replacement));
                mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua;
+               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
                mbio->bi_private = r10_bio;
  
                atomic_inc(&r10_bio->remaining);
@@@ -1512,16 -1570,14 +1570,16 @@@ static int _enough(struct r10conf *conf
        do {
                int n = conf->copies;
                int cnt = 0;
 +              int this = first;
                while (n--) {
 -                      if (conf->mirrors[first].rdev &&
 -                          first != ignore)
 +                      if (conf->mirrors[this].rdev &&
 +                          this != ignore)
                                cnt++;
 -                      first = (first+1) % geo->raid_disks;
 +                      this = (this+1) % geo->raid_disks;
                }
                if (cnt == 0)
                        return 0;
 +              first = (first + geo->near_copies) % geo->raid_disks;
        } while (first != 0);
        return 1;
  }
@@@ -1638,7 -1694,7 +1696,7 @@@ static int raid10_spare_active(struct m
                           && !test_bit(Faulty, &tmp->rdev->flags)
                           && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
                        count++;
-                       sysfs_notify_dirent(tmp->rdev->sysfs_state);
+                       sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
                }
        }
        spin_lock_irqsave(&conf->device_lock, flags);
@@@ -1725,6 -1781,9 +1783,9 @@@ static int raid10_add_disk(struct mdde
                clear_bit(Unmerged, &rdev->flags);
        }
        md_integrity_add_rdev(rdev, mddev);
+       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
        return err;
  }
@@@ -1952,7 -2011,7 +2013,7 @@@ static void sync_request_write(struct m
                                        break;
                        if (j == vcnt)
                                continue;
-                       mddev->resync_mismatches += r10_bio->sectors;
+                       atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
                        if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
                                /* Don't fix anything. */
                                continue;
@@@ -2673,8 -2732,9 +2734,9 @@@ static void handle_write_completed(stru
        }
  }
  
- static void raid10d(struct mddev *mddev)
+ static void raid10d(struct md_thread *thread)
  {
+       struct mddev *mddev = thread->mddev;
        struct r10bio *r10_bio;
        unsigned long flags;
        struct r10conf *conf = mddev->private;
@@@ -3158,7 -3218,7 +3220,7 @@@ static sector_t sync_request(struct mdd
                                else {
                                        bad_sectors -= (sector - first_bad);
                                        if (max_sync > bad_sectors)
-                                               max_sync = max_sync;
+                                               max_sync = bad_sectors;
                                        continue;
                                }
                        }
@@@ -3482,6 -3542,7 +3544,7 @@@ static int run(struct mddev *mddev
        sector_t size;
        sector_t min_offset_diff = 0;
        int first = 1;
+       bool discard_supported = false;
  
        if (mddev->private == NULL) {
                conf = setup_conf(mddev);
  
        chunk_size = mddev->chunk_sectors << 9;
        if (mddev->queue) {
+               blk_queue_max_discard_sectors(mddev->queue,
+                                             mddev->chunk_sectors);
                blk_queue_io_min(mddev->queue, chunk_size);
                if (conf->geo.raid_disks % conf->geo.near_copies)
                        blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
                                          rdev->data_offset << 9);
  
                disk->head_position = 0;
+               if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+                       discard_supported = true;
        }
  
+       if (discard_supported)
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+       else
+               queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        /* need to check that every block has at least one working mirror */
        if (!enough(conf, -1)) {
                printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
diff --combined drivers/md/raid5.c
@@@ -393,8 -393,6 +393,8 @@@ static int calc_degraded(struct r5conf 
        degraded = 0;
        for (i = 0; i < conf->previous_raid_disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 +              if (rdev && test_bit(Faulty, &rdev->flags))
 +                      rdev = rcu_dereference(conf->disks[i].replacement);
                if (!rdev || test_bit(Faulty, &rdev->flags))
                        degraded++;
                else if (test_bit(In_sync, &rdev->flags))
        degraded2 = 0;
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
 +              if (rdev && test_bit(Faulty, &rdev->flags))
 +                      rdev = rcu_dereference(conf->disks[i].replacement);
                if (!rdev || test_bit(Faulty, &rdev->flags))
                        degraded2++;
                else if (test_bit(In_sync, &rdev->flags))
@@@ -551,6 -547,8 +551,8 @@@ static void ops_run_io(struct stripe_he
                                rw = WRITE_FUA;
                        else
                                rw = WRITE;
+                       if (test_bit(R5_Discard, &sh->dev[i].flags))
+                               rw |= REQ_DISCARD;
                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
                        rw = READ;
                else if (test_and_clear_bit(R5_WantReplace,
@@@ -1174,8 -1172,11 +1176,11 @@@ ops_run_biodrain(struct stripe_head *sh
                                        set_bit(R5_WantFUA, &dev->flags);
                                if (wbi->bi_rw & REQ_SYNC)
                                        set_bit(R5_SyncIO, &dev->flags);
-                               tx = async_copy_data(1, wbi, dev->page,
-                                       dev->sector, tx);
+                               if (wbi->bi_rw & REQ_DISCARD)
+                                       set_bit(R5_Discard, &dev->flags);
+                               else
+                                       tx = async_copy_data(1, wbi, dev->page,
+                                               dev->sector, tx);
                                wbi = r5_next_bio(wbi, dev->sector);
                        }
                }
@@@ -1191,7 -1192,7 +1196,7 @@@ static void ops_complete_reconstruct(vo
        int pd_idx = sh->pd_idx;
        int qd_idx = sh->qd_idx;
        int i;
-       bool fua = false, sync = false;
+       bool fua = false, sync = false, discard = false;
  
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
        for (i = disks; i--; ) {
                fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
                sync |= test_bit(R5_SyncIO, &sh->dev[i].flags);
+               discard |= test_bit(R5_Discard, &sh->dev[i].flags);
        }
  
        for (i = disks; i--; ) {
                struct r5dev *dev = &sh->dev[i];
  
                if (dev->written || i == pd_idx || i == qd_idx) {
-                       set_bit(R5_UPTODATE, &dev->flags);
+                       if (!discard)
+                               set_bit(R5_UPTODATE, &dev->flags);
                        if (fua)
                                set_bit(R5_WantFUA, &dev->flags);
                        if (sync)
@@@ -1241,6 -1244,18 +1248,18 @@@ ops_run_reconstruct5(struct stripe_hea
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
  
+       for (i = 0; i < sh->disks; i++) {
+               if (pd_idx == i)
+                       continue;
+               if (!test_bit(R5_Discard, &sh->dev[i].flags))
+                       break;
+       }
+       if (i >= sh->disks) {
+               atomic_inc(&sh->count);
+               set_bit(R5_Discard, &sh->dev[pd_idx].flags);
+               ops_complete_reconstruct(sh);
+               return;
+       }
        /* check if prexor is active which means only process blocks
         * that are part of a read-modify-write (written)
         */
@@@ -1285,10 -1300,24 +1304,24 @@@ ops_run_reconstruct6(struct stripe_hea
  {
        struct async_submit_ctl submit;
        struct page **blocks = percpu->scribble;
-       int count;
+       int count, i;
  
        pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
  
+       for (i = 0; i < sh->disks; i++) {
+               if (sh->pd_idx == i || sh->qd_idx == i)
+                       continue;
+               if (!test_bit(R5_Discard, &sh->dev[i].flags))
+                       break;
+       }
+       if (i >= sh->disks) {
+               atomic_inc(&sh->count);
+               set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
+               set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
+               ops_complete_reconstruct(sh);
+               return;
+       }
        count = set_syndrome_sources(blocks, sh);
  
        atomic_inc(&sh->count);
@@@ -1591,7 -1620,6 +1624,7 @@@ static int resize_stripes(struct r5con
                #ifdef CONFIG_MULTICORE_RAID456
                init_waitqueue_head(&nsh->ops.wait_for_ops);
                #endif
 +              spin_lock_init(&nsh->stripe_lock);
  
                list_add(&nsh->lru, &newstripes);
        }
@@@ -2408,11 -2436,11 +2441,11 @@@ static int add_stripe_bio(struct stripe
                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
        }
-       spin_unlock_irq(&sh->stripe_lock);
  
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_sector,
                (unsigned long long)sh->sector, dd_idx);
+       spin_unlock_irq(&sh->stripe_lock);
  
        if (conf->mddev->bitmap && firstwrite) {
                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
@@@ -2479,10 -2507,8 +2512,8 @@@ handle_failed_stripe(struct r5conf *con
                bi = sh->dev[i].towrite;
                sh->dev[i].towrite = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-               if (bi) {
-                       s->to_write--;
+               if (bi)
                        bitmap_end = 1;
-               }
  
                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                        wake_up(&conf->wait_for_overlap);
                if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
                    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
                      test_bit(R5_ReadError, &sh->dev[i].flags))) {
+                       spin_lock_irq(&sh->stripe_lock);
                        bi = sh->dev[i].toread;
                        sh->dev[i].toread = NULL;
+                       spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
-                       if (bi) s->to_read--;
                        while (bi && bi->bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
@@@ -2741,7 -2768,8 +2773,8 @@@ static void handle_stripe_clean_event(s
                if (sh->dev[i].written) {
                        dev = &sh->dev[i];
                        if (!test_bit(R5_LOCKED, &dev->flags) &&
-                               test_bit(R5_UPTODATE, &dev->flags)) {
+                           (test_bit(R5_UPTODATE, &dev->flags) ||
+                            test_and_clear_bit(R5_Discard, &dev->flags))) {
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
                                pr_debug("Return write for disc %d\n", i);
@@@ -2775,12 -2803,25 +2808,25 @@@ static void handle_stripe_dirtying(stru
                                   int disks)
  {
        int rmw = 0, rcw = 0, i;
-       if (conf->max_degraded == 2) {
-               /* RAID6 requires 'rcw' in current implementation
-                * Calculate the real rcw later - for now fake it
+       sector_t recovery_cp = conf->mddev->recovery_cp;
+       /* RAID6 requires 'rcw' in current implementation.
+        * Otherwise, check whether resync is now happening or should start.
+        * If yes, then the array is dirty (after unclean shutdown or
+        * initial creation), so parity in some stripes might be inconsistent.
+        * In this case, we need to always do reconstruct-write, to ensure
+        * that in case of drive failure or read-error correction, we
+        * generate correct data from the parity.
+        */
+       if (conf->max_degraded == 2 ||
+           (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
+               /* Calculate the real rcw later - for now make it
                 * look like rcw is cheaper
                 */
                rcw = 1; rmw = 2;
+               pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
+                        conf->max_degraded, (unsigned long long)recovery_cp,
+                        (unsigned long long)sh->sector);
        } else for (i = disks; i--; ) {
                /* would I have to read this buffer for read_modify_write */
                struct r5dev *dev = &sh->dev[i];
@@@ -2932,7 -2973,7 +2978,7 @@@ static void handle_parity_checks5(struc
                         */
                        set_bit(STRIPE_INSYNC, &sh->state);
                else {
-                       conf->mddev->resync_mismatches += STRIPE_SECTORS;
+                       atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
@@@ -3084,7 -3125,7 +3130,7 @@@ static void handle_parity_checks6(struc
                                 */
                        }
                } else {
-                       conf->mddev->resync_mismatches += STRIPE_SECTORS;
+                       atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
                                /* don't try to repair!! */
                                set_bit(STRIPE_INSYNC, &sh->state);
@@@ -3459,10 -3500,12 +3505,12 @@@ static void handle_stripe(struct stripe
        if (s.written &&
            (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
                             && !test_bit(R5_LOCKED, &pdev->flags)
-                            && test_bit(R5_UPTODATE, &pdev->flags)))) &&
+                            && (test_bit(R5_UPTODATE, &pdev->flags) ||
+                                test_bit(R5_Discard, &pdev->flags))))) &&
            (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
                             && !test_bit(R5_LOCKED, &qdev->flags)
-                            && test_bit(R5_UPTODATE, &qdev->flags)))))
+                            && (test_bit(R5_UPTODATE, &qdev->flags) ||
+                                test_bit(R5_Discard, &qdev->flags))))))
                handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
  
        /* Now we might consider reading some blocks, either to check/generate
                /* All the 'written' buffers and the parity block are ready to
                 * be written back to disk
                 */
-               BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
+               BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) &&
+                      !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags));
                BUG_ON(sh->qd_idx >= 0 &&
-                      !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
+                      !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) &&
+                      !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags));
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if (test_bit(R5_LOCKED, &dev->flags) &&
@@@ -4072,6 -4117,88 +4122,88 @@@ static void release_stripe_plug(struct 
                release_stripe(sh);
  }
  
+ static void make_discard_request(struct mddev *mddev, struct bio *bi)
+ {
+       struct r5conf *conf = mddev->private;
+       sector_t logical_sector, last_sector;
+       struct stripe_head *sh;
+       int remaining;
+       int stripe_sectors;
+       if (mddev->reshape_position != MaxSector)
+               /* Skip discard while reshape is happening */
+               return;
+       logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
+       last_sector = bi->bi_sector + (bi->bi_size>>9);
+       bi->bi_next = NULL;
+       bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+       stripe_sectors = conf->chunk_sectors *
+               (conf->raid_disks - conf->max_degraded);
+       logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector,
+                                              stripe_sectors);
+       sector_div(last_sector, stripe_sectors);
+       logical_sector *= conf->chunk_sectors;
+       last_sector *= conf->chunk_sectors;
+       for (; logical_sector < last_sector;
+            logical_sector += STRIPE_SECTORS) {
+               DEFINE_WAIT(w);
+               int d;
+       again:
+               sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
+               prepare_to_wait(&conf->wait_for_overlap, &w,
+                               TASK_UNINTERRUPTIBLE);
+               spin_lock_irq(&sh->stripe_lock);
+               for (d = 0; d < conf->raid_disks; d++) {
+                       if (d == sh->pd_idx || d == sh->qd_idx)
+                               continue;
+                       if (sh->dev[d].towrite || sh->dev[d].toread) {
+                               set_bit(R5_Overlap, &sh->dev[d].flags);
+                               spin_unlock_irq(&sh->stripe_lock);
+                               release_stripe(sh);
+                               schedule();
+                               goto again;
+                       }
+               }
+               finish_wait(&conf->wait_for_overlap, &w);
+               for (d = 0; d < conf->raid_disks; d++) {
+                       if (d == sh->pd_idx || d == sh->qd_idx)
+                               continue;
+                       sh->dev[d].towrite = bi;
+                       set_bit(R5_OVERWRITE, &sh->dev[d].flags);
+                       raid5_inc_bi_active_stripes(bi);
+               }
+               spin_unlock_irq(&sh->stripe_lock);
+               if (conf->mddev->bitmap) {
+                       for (d = 0;
+                            d < conf->raid_disks - conf->max_degraded;
+                            d++)
+                               bitmap_startwrite(mddev->bitmap,
+                                                 sh->sector,
+                                                 STRIPE_SECTORS,
+                                                 0);
+                       sh->bm_seq = conf->seq_flush + 1;
+                       set_bit(STRIPE_BIT_DELAY, &sh->state);
+               }
+               set_bit(STRIPE_HANDLE, &sh->state);
+               clear_bit(STRIPE_DELAYED, &sh->state);
+               if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+                       atomic_inc(&conf->preread_active_stripes);
+               release_stripe_plug(mddev, sh);
+       }
+       remaining = raid5_dec_bi_active_stripes(bi);
+       if (remaining == 0) {
+               md_write_end(mddev);
+               bio_endio(bi, 0);
+       }
+ }
  static void make_request(struct mddev *mddev, struct bio * bi)
  {
        struct r5conf *conf = mddev->private;
             chunk_aligned_read(mddev,bi))
                return;
  
+       if (unlikely(bi->bi_rw & REQ_DISCARD)) {
+               make_discard_request(mddev, bi);
+               return;
+       }
        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
        last_sector = bi->bi_sector + (bi->bi_size>>9);
        bi->bi_next = NULL;
                        finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
 -                      if ((bi->bi_rw & REQ_NOIDLE) &&
 +                      if ((bi->bi_rw & REQ_SYNC) &&
                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
                        release_stripe_plug(mddev, sh);
@@@ -4630,8 -4762,9 +4767,9 @@@ static int handle_active_stripes(struc
   * During the scan, completed stripes are saved for us by the interrupt
   * handler, so that they will not have to wait for our next wakeup.
   */
- static void raid5d(struct mddev *mddev)
+ static void raid5d(struct md_thread *thread)
  {
+       struct mddev *mddev = thread->mddev;
        struct r5conf *conf = mddev->private;
        int handled;
        struct blk_plug plug;
@@@ -5366,6 -5499,7 +5504,7 @@@ static int run(struct mddev *mddev
  
        if (mddev->queue) {
                int chunk_size;
+               bool discard_supported = true;
                /* read-ahead size must cover two whole stripes, which
                 * is 2 * (datadisks) * chunksize where 'n' is the
                 * number of raid devices
                blk_queue_io_min(mddev->queue, chunk_size);
                blk_queue_io_opt(mddev->queue, chunk_size *
                                 (conf->raid_disks - conf->max_degraded));
+               /*
+                * We can only discard a whole stripe. It doesn't make sense to
+                * discard data disk but write parity disk
+                */
+               stripe = stripe * PAGE_SIZE;
+               mddev->queue->limits.discard_alignment = stripe;
+               mddev->queue->limits.discard_granularity = stripe;
+               /*
+                * unaligned part of discard request will be ignored, so can't
+                * guarantee discard_zerors_data
+                */
+               mddev->queue->limits.discard_zeroes_data = 0;
  
                rdev_for_each(rdev, mddev) {
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->new_data_offset << 9);
+                       /*
+                        * discard_zeroes_data is required, otherwise data
+                        * could be lost. Consider a scenario: discard a stripe
+                        * (the stripe could be inconsistent if
+                        * discard_zeroes_data is 0); write one disk of the
+                        * stripe (the stripe could be inconsistent again
+                        * depending on which disks are used to calculate
+                        * parity); the disk is broken; The stripe data of this
+                        * disk is lost.
+                        */
+                       if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) ||
+                           !bdev_get_queue(rdev->bdev)->
+                                               limits.discard_zeroes_data)
+                               discard_supported = false;
                }
+               if (discard_supported &&
+                  mddev->queue->limits.max_discard_sectors >= stripe &&
+                  mddev->queue->limits.discard_granularity >= stripe)
+                       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+                                               mddev->queue);
+               else
+                       queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+                                               mddev->queue);
        }
  
        return 0;
@@@ -5702,7 -5871,8 +5876,8 @@@ static int check_reshape(struct mddev *
        if (!check_stripe_cache(mddev))
                return -ENOSPC;
  
-       return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
+       return resize_stripes(conf, (conf->previous_raid_disks
+                                    + mddev->delta_disks));
  }
  
  static int raid5_start_reshape(struct mddev *mddev)