Merge tag 'md/4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Apr 2018 17:39:44 +0000 (10:39 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Apr 2018 17:39:44 +0000 (10:39 -0700)
Pull MD fixes from Shaohua Li:
 "Three small fixes for MD:

   - md-cluster fix for faulty device from Guoqing

   - writehint fix for writebehind IO for raid1 from Mariusz

   - a live lock fix for interrupted recovery from Yufen"

* tag 'md/4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md:
  raid1: copy write hint from master bio to behind bio
  md/raid1: exit sync request if MD_RECOVERY_INTR is set
  md-cluster: don't update recovery_offset for faulty device

drivers/md/md.c
drivers/md/raid1.c

index 3bea45e..c208c01 100644 (file)
@@ -9256,8 +9256,10 @@ void md_reload_sb(struct mddev *mddev, int nr)
        check_sb_changes(mddev, rdev);
 
        /* Read all rdev's to update recovery_offset */
-       rdev_for_each_rcu(rdev, mddev)
-               read_rdev(mddev, rdev);
+       rdev_for_each_rcu(rdev, mddev) {
+               if (!test_bit(Faulty, &rdev->flags))
+                       read_rdev(mddev, rdev);
+       }
 }
 EXPORT_SYMBOL(md_reload_sb);
 
index e2943fb..e9e3308 100644 (file)
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
  *    there is no normal IO happeing.  It must arrange to call
  *    lower_barrier when the particular background IO completes.
  */
-static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
+static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
 {
        int idx = sector_to_idx(sector_nr);
 
@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
         *    max resync count which allowed on current I/O barrier bucket.
         */
        wait_event_lock_irq(conf->wait_barrier,
-                           !conf->array_frozen &&
+                           (!conf->array_frozen &&
                             !atomic_read(&conf->nr_pending[idx]) &&
-                            atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
+                            atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
+                               test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
                            conf->resync_lock);
 
+       if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+               atomic_dec(&conf->barrier[idx]);
+               spin_unlock_irq(&conf->resync_lock);
+               wake_up(&conf->wait_barrier);
+               return -EINTR;
+       }
+
        atomic_inc(&conf->nr_sync_pending);
        spin_unlock_irq(&conf->resync_lock);
+
+       return 0;
 }
 
 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
                goto skip_copy;
        }
 
+       behind_bio->bi_write_hint = bio->bi_write_hint;
+
        while (i < vcnt && size) {
                struct page *page;
                int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
 
        bitmap_cond_end_sync(mddev->bitmap, sector_nr,
                mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
-       r1_bio = raid1_alloc_init_r1buf(conf);
 
-       raise_barrier(conf, sector_nr);
+
+       if (raise_barrier(conf, sector_nr))
+               return 0;
+
+       r1_bio = raid1_alloc_init_r1buf(conf);
 
        rcu_read_lock();
        /*