Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md...
authorJens Axboe <axboe@kernel.dk>
Thu, 10 Mar 2022 23:04:03 +0000 (16:04 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 10 Mar 2022 23:04:03 +0000 (16:04 -0700)
Pull MD updates from Song:

"This set contains raid5 bio handling cleanups for raid5."

* 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md:
  raid5: initialize the stripe_head embeeded bios as needed
  raid5-cache: statically allocate the recovery ra bio
  raid5-cache: fully initialize flush_bio when needed
  raid5-ppl: fully initialize the bio in ppl_new_iounit

drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c

index 86e2bb8..a7d50ff 100644 (file)
@@ -1266,6 +1266,8 @@ static void r5l_log_flush_endio(struct bio *bio)
                r5l_io_run_stripes(io);
        list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
        spin_unlock_irqrestore(&log->io_list_lock, flags);
+
+       bio_uninit(bio);
 }
 
 /*
@@ -1301,7 +1303,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
 
        if (!do_flush)
                return;
-       bio_reset(&log->flush_bio, log->rdev->bdev,
+       bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
                  REQ_OP_WRITE | REQ_PREFLUSH);
        log->flush_bio.bi_end_io = r5l_log_flush_endio;
        submit_bio(&log->flush_bio);
@@ -1621,10 +1623,10 @@ struct r5l_recovery_ctx {
         * just copy data from the pool.
         */
        struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
+       struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
        sector_t pool_offset;   /* offset of first page in the pool */
        int total_pages;        /* total allocated pages */
        int valid_pages;        /* pages with valid data */
-       struct bio *ra_bio;     /* bio to do the read ahead */
 };
 
 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
@@ -1632,11 +1634,6 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
 {
        struct page *page;
 
-       ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
-                                      &log->bs);
-       if (!ctx->ra_bio)
-               return -ENOMEM;
-
        ctx->valid_pages = 0;
        ctx->total_pages = 0;
        while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
@@ -1648,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
                ctx->total_pages += 1;
        }
 
-       if (ctx->total_pages == 0) {
-               bio_put(ctx->ra_bio);
+       if (ctx->total_pages == 0)
                return -ENOMEM;
-       }
 
        ctx->pool_offset = 0;
        return 0;
@@ -1664,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,
 
        for (i = 0; i < ctx->total_pages; ++i)
                put_page(ctx->ra_pool[i]);
-       bio_put(ctx->ra_bio);
 }
 
 /*
@@ -1677,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
                                      struct r5l_recovery_ctx *ctx,
                                      sector_t offset)
 {
-       bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
-       ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
+       struct bio bio;
+       int ret;
+
+       bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
+                R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
+       bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
 
        ctx->valid_pages = 0;
        ctx->pool_offset = offset;
 
        while (ctx->valid_pages < ctx->total_pages) {
-               bio_add_page(ctx->ra_bio,
-                            ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
+               __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
+                              0);
                ctx->valid_pages += 1;
 
                offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
@@ -1694,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
                        break;
        }
 
-       return submit_bio_wait(ctx->ra_bio);
+       ret = submit_bio_wait(&bio);
+       bio_uninit(&bio);
+       return ret;
 }
 
 /*
@@ -3105,7 +3105,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
        INIT_LIST_HEAD(&log->io_end_ios);
        INIT_LIST_HEAD(&log->flushing_ios);
        INIT_LIST_HEAD(&log->finished_ios);
-       bio_init(&log->flush_bio, NULL, NULL, 0, 0);
 
        log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
        if (!log->io_kc)
index 3446797..f7fdd82 100644 (file)
@@ -250,7 +250,8 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
        INIT_LIST_HEAD(&io->stripe_list);
        atomic_set(&io->pending_stripes, 0);
        atomic_set(&io->pending_flushes, 0);
-       bio_init(&io->bio, NULL, io->biovec, PPL_IO_INLINE_BVECS, 0);
+       bio_init(&io->bio, log->rdev->bdev, io->biovec, PPL_IO_INLINE_BVECS,
+                REQ_OP_WRITE | REQ_FUA);
 
        pplhdr = page_address(io->header_page);
        clear_page(pplhdr);
@@ -465,8 +466,6 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
 
 
        bio->bi_end_io = ppl_log_endio;
-       bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
-       bio_set_dev(bio, log->rdev->bdev);
        bio->bi_iter.bi_sector = log->next_io_sector;
        bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
        bio->bi_write_hint = ppl_conf->write_hint;
index 8891aab..8bd5f06 100644 (file)
@@ -1060,6 +1060,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
        int i, disks = sh->disks;
        struct stripe_head *head_sh = sh;
        struct bio_list pending_bios = BIO_EMPTY_LIST;
+       struct r5dev *dev;
        bool should_defer;
 
        might_sleep();
@@ -1094,8 +1095,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        op_flags |= REQ_SYNC;
 
 again:
-               bi = &sh->dev[i].req;
-               rbi = &sh->dev[i].rreq; /* For writing to replacement */
+               dev = &sh->dev[i];
+               bi = &dev->req;
+               rbi = &dev->rreq; /* For writing to replacement */
 
                rcu_read_lock();
                rrdev = rcu_dereference(conf->disks[i].replacement);
@@ -1171,8 +1173,7 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_set_dev(bi, rdev->bdev);
-                       bio_set_op_attrs(bi, op, op_flags);
+                       bio_init(bi, rdev->bdev, &dev->vec, 1, op | op_flags);
                        bi->bi_end_io = op_is_write(op)
                                ? raid5_end_write_request
                                : raid5_end_read_request;
@@ -1238,8 +1239,7 @@ again:
 
                        set_bit(STRIPE_IO_STARTED, &sh->state);
 
-                       bio_set_dev(rbi, rrdev->bdev);
-                       bio_set_op_attrs(rbi, op, op_flags);
+                       bio_init(rbi, rrdev->bdev, &dev->rvec, 1, op | op_flags);
                        BUG_ON(!op_is_write(op));
                        rbi->bi_end_io = raid5_end_write_request;
                        rbi->bi_private = sh;
@@ -2294,7 +2294,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
        int disks, struct r5conf *conf)
 {
        struct stripe_head *sh;
-       int i;
 
        sh = kmem_cache_zalloc(sc, gfp);
        if (sh) {
@@ -2307,12 +2306,6 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
                atomic_set(&sh->count, 1);
                sh->raid_conf = conf;
                sh->log_start = MaxSector;
-               for (i = 0; i < disks; i++) {
-                       struct r5dev *dev = &sh->dev[i];
-
-                       bio_init(&dev->req, NULL, &dev->vec, 1, 0);
-                       bio_init(&dev->rreq, NULL, &dev->rvec, 1, 0);
-               }
 
                if (raid5_has_ppl(conf)) {
                        sh->ppl_page = alloc_page(gfp);
@@ -2677,7 +2670,6 @@ static void raid5_end_read_request(struct bio * bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_status);
        if (i == disks) {
-               bio_reset(bi, NULL, 0);
                BUG();
                return;
        }
@@ -2785,7 +2777,7 @@ static void raid5_end_read_request(struct bio * bi)
                }
        }
        rdev_dec_pending(rdev, conf->mddev);
-       bio_reset(bi, NULL, 0);
+       bio_uninit(bi);
        clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);
        raid5_release_stripe(sh);
@@ -2823,7 +2815,6 @@ static void raid5_end_write_request(struct bio *bi)
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
                bi->bi_status);
        if (i == disks) {
-               bio_reset(bi, NULL, 0);
                BUG();
                return;
        }
@@ -2860,7 +2851,7 @@ static void raid5_end_write_request(struct bio *bi)
        if (sh->batch_head && bi->bi_status && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
-       bio_reset(bi, NULL, 0);
+       bio_uninit(bi);
        if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        set_bit(STRIPE_HANDLE, &sh->state);