zram: refactor highlevel read and write handling
authorChristoph Hellwig <hch@lst.de>
Tue, 11 Apr 2023 17:14:49 +0000 (19:14 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 18 Apr 2023 23:29:58 +0000 (16:29 -0700)
Instead of having an outer loop in __zram_make_request and then branch out
for reads vs writes for each loop iteration in zram_bvec_rw, split the
main handler into separat zram_bio_read and zram_bio_write handlers that
also include the functionality formerly in zram_bvec_rw.

Link: https://lkml.kernel.org/r/20230411171459.567614-8-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/block/zram/zram_drv.c

index 46dc7a2..2d01544 100644 (file)
@@ -1921,38 +1921,34 @@ static void zram_bio_discard(struct zram *zram, struct bio *bio)
        bio_endio(bio);
 }
 
-/*
- * Returns errno if it has some problem. Otherwise return 0 or 1.
- * Returns 0 if IO request was done synchronously
- * Returns 1 if IO request was successfully submitted.
- */
-static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
-                       int offset, enum req_op op, struct bio *bio)
+static void zram_bio_read(struct zram *zram, struct bio *bio)
 {
-       int ret;
+       struct bvec_iter iter;
+       struct bio_vec bv;
+       unsigned long start_time;
 
-       if (!op_is_write(op)) {
-               ret = zram_bvec_read(zram, bvec, index, offset, bio);
-               if (unlikely(ret < 0)) {
+       start_time = bio_start_io_acct(bio);
+       bio_for_each_segment(bv, bio, iter) {
+               u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
+               u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
+                               SECTOR_SHIFT;
+
+               if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
                        atomic64_inc(&zram->stats.failed_reads);
-                       return ret;
-               }
-               flush_dcache_page(bvec->bv_page);
-       } else {
-               ret = zram_bvec_write(zram, bvec, index, offset, bio);
-               if (unlikely(ret < 0)) {
-                       atomic64_inc(&zram->stats.failed_writes);
-                       return ret;
+                       bio->bi_status = BLK_STS_IOERR;
+                       break;
                }
-       }
+               flush_dcache_page(bv.bv_page);
 
-       zram_slot_lock(zram, index);
-       zram_accessed(zram, index);
-       zram_slot_unlock(zram, index);
-       return 0;
+               zram_slot_lock(zram, index);
+               zram_accessed(zram, index);
+               zram_slot_unlock(zram, index);
+       }
+       bio_end_io_acct(bio, start_time);
+       bio_endio(bio);
 }
 
-static void __zram_make_request(struct zram *zram, struct bio *bio)
+static void zram_bio_write(struct zram *zram, struct bio *bio)
 {
        struct bvec_iter iter;
        struct bio_vec bv;
@@ -1964,11 +1960,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
                u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) <<
                                SECTOR_SHIFT;
 
-               if (zram_bvec_rw(zram, &bv, index, offset, bio_op(bio),
-                               bio) < 0) {
+               if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
+                       atomic64_inc(&zram->stats.failed_writes);
                        bio->bi_status = BLK_STS_IOERR;
                        break;
                }
+
+               zram_slot_lock(zram, index);
+               zram_accessed(zram, index);
+               zram_slot_unlock(zram, index);
        }
        bio_end_io_acct(bio, start_time);
        bio_endio(bio);
@@ -1983,8 +1983,10 @@ static void zram_submit_bio(struct bio *bio)
 
        switch (bio_op(bio)) {
        case REQ_OP_READ:
+               zram_bio_read(zram, bio);
+               break;
        case REQ_OP_WRITE:
-               __zram_make_request(zram, bio);
+               zram_bio_write(zram, bio);
                break;
        case REQ_OP_DISCARD:
        case REQ_OP_WRITE_ZEROES: