raid1: serialize the overlap write
authorGuoqing Jiang <guoqing.jiang@cloud.ionos.com>
Mon, 23 Dec 2019 09:48:58 +0000 (10:48 +0100)
committerSong Liu <songliubraving@fb.com>
Mon, 13 Jan 2020 19:44:09 +0000 (11:44 -0800)
Before dispatch write bio, raid1 array which enables
serialize_policy need to check if overlap exists between
this bio and previous on-flying bios. If there is overlap,
then it has to wait until the collision is disappeared.

Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
drivers/md/raid1.c

index 0439f67..3ad2f5a 100644 (file)
@@ -430,6 +430,8 @@ static void raid1_end_write_request(struct bio *bio)
        int mirror = find_bio_disk(r1_bio, bio);
        struct md_rdev *rdev = conf->mirrors[mirror].rdev;
        bool discard_error;
+       sector_t lo = r1_bio->sector;
+       sector_t hi = r1_bio->sector + r1_bio->sectors;
 
        discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
@@ -499,12 +501,8 @@ static void raid1_end_write_request(struct bio *bio)
        }
 
        if (behind) {
-               if (test_bit(CollisionCheck, &rdev->flags)) {
-                       sector_t lo = r1_bio->sector;
-                       sector_t hi = r1_bio->sector + r1_bio->sectors;
-
+               if (test_bit(CollisionCheck, &rdev->flags))
                        remove_serial(rdev, lo, hi);
-               }
                if (test_bit(WriteMostly, &rdev->flags))
                        atomic_dec(&r1_bio->behind_remaining);
 
@@ -527,7 +525,8 @@ static void raid1_end_write_request(struct bio *bio)
                                call_bio_endio(r1_bio);
                        }
                }
-       }
+       } else if (rdev->mddev->serialize_policy)
+               remove_serial(rdev, lo, hi);
        if (r1_bio->bios[mirror] == NULL)
                rdev_dec_pending(rdev, conf->mddev);
 
@@ -1337,6 +1336,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
        struct raid1_plug_cb *plug = NULL;
        int first_clone;
        int max_sectors;
+       sector_t lo, hi;
 
        if (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
@@ -1364,6 +1364,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
        r1_bio = alloc_r1bio(mddev, bio);
        r1_bio->sectors = max_write_sectors;
+       lo = r1_bio->sector;
+       hi = r1_bio->sector + r1_bio->sectors;
 
        if (conf->pending_count >= max_queued_requests) {
                md_wakeup_thread(mddev->thread);
@@ -1479,6 +1481,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
 
        for (i = 0; i < disks; i++) {
                struct bio *mbio = NULL;
+               struct md_rdev *rdev = conf->mirrors[i].rdev;
                if (!r1_bio->bios[i])
                        continue;
 
@@ -1506,19 +1509,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                        mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
 
                if (r1_bio->behind_master_bio) {
-                       struct md_rdev *rdev = conf->mirrors[i].rdev;
-
-                       if (test_bit(CollisionCheck, &rdev->flags)) {
-                               sector_t lo = r1_bio->sector;
-                               sector_t hi = r1_bio->sector + r1_bio->sectors;
-
+                       if (test_bit(CollisionCheck, &rdev->flags))
                                wait_event(rdev->serial_io_wait,
                                           check_and_add_serial(rdev, lo, hi)
                                           == 0);
-                       }
                        if (test_bit(WriteMostly, &rdev->flags))
                                atomic_inc(&r1_bio->behind_remaining);
-               }
+               } else if (mddev->serialize_policy)
+                       wait_event(rdev->serial_io_wait,
+                                  check_and_add_serial(rdev, lo, hi) == 0);
 
                r1_bio->bios[i] = mbio;