block: use bdev_is_zoned instead of open coding it
authorChristoph Hellwig <hch@lst.de>
Wed, 6 Jul 2022 07:03:37 +0000 (09:03 +0200)
committerJens Axboe <axboe@kernel.dk>
Wed, 6 Jul 2022 12:46:25 +0000 (06:46 -0600)
Use bdev_is_zoned in all places where a block_device is available instead
of open coding it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Link: https://lore.kernel.org/r/20220706070350.1703384-4-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/bio.c
block/blk-core.c
block/blk-mq.h
block/blk-zoned.c
drivers/md/dm-table.c
drivers/md/dm-zone.c
drivers/md/dm.c

index 933ea32..888ee81 100644 (file)
@@ -1033,7 +1033,7 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page,
        if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
                return 0;
 
-       if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
+       if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
                return 0;
 
        return bio_add_hw_page(q, bio, page, len, offset,
index 5ad7bd9..6bcca0b 100644 (file)
@@ -569,7 +569,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
        int nr_sectors = bio_sectors(bio);
 
        /* Only applicable to zoned block devices */
-       if (!blk_queue_is_zoned(q))
+       if (!bdev_is_zoned(bio->bi_bdev))
                return BLK_STS_NOTSUPP;
 
        /* The bio sector must point to the start of a sequential zone */
@@ -775,11 +775,11 @@ void submit_bio_noacct(struct bio *bio)
        case REQ_OP_ZONE_OPEN:
        case REQ_OP_ZONE_CLOSE:
        case REQ_OP_ZONE_FINISH:
-               if (!blk_queue_is_zoned(q))
+               if (!bdev_is_zoned(bio->bi_bdev))
                        goto not_supported;
                break;
        case REQ_OP_ZONE_RESET_ALL:
-               if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
+               if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
                        goto not_supported;
                break;
        case REQ_OP_WRITE_ZEROES:
index 54e20ed..31d75a8 100644 (file)
@@ -317,7 +317,7 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
         * For regular block devices or read operations, use the context plug
         * which may be NULL if blk_start_plug() was not executed.
         */
-       if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio)))
+       if (!bdev_is_zoned(bio->bi_bdev) || !op_is_write(bio_op(bio)))
                return current->plug;
 
        /* Zoned block device write operation case: do not plug the BIO */
index 38cd840..90a5c9c 100644 (file)
@@ -149,8 +149,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
        struct gendisk *disk = bdev->bd_disk;
        sector_t capacity = get_capacity(disk);
 
-       if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
-           WARN_ON_ONCE(!disk->fops->report_zones))
+       if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
                return -EOPNOTSUPP;
 
        if (!nr_zones || sector >= capacity)
@@ -268,7 +267,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
        struct bio *bio = NULL;
        int ret = 0;
 
-       if (!blk_queue_is_zoned(q))
+       if (!bdev_is_zoned(bdev))
                return -EOPNOTSUPP;
 
        if (bdev_read_only(bdev))
@@ -350,7 +349,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
        if (!q)
                return -ENXIO;
 
-       if (!blk_queue_is_zoned(q))
+       if (!bdev_is_zoned(bdev))
                return -ENOTTY;
 
        if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
@@ -408,7 +407,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
        if (!q)
                return -ENXIO;
 
-       if (!blk_queue_is_zoned(q))
+       if (!bdev_is_zoned(bdev))
                return -ENOTTY;
 
        if (!(mode & FMODE_WRITE))
index bd539af..b36b528 100644 (file)
@@ -1623,7 +1623,7 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
        struct request_queue *q = bdev_get_queue(dev->bdev);
        unsigned int *zone_sectors = data;
 
-       if (!blk_queue_is_zoned(q))
+       if (!bdev_is_zoned(dev->bdev))
                return 0;
 
        return blk_queue_zone_sectors(q) != *zone_sectors;
index 3e7b1fe..ae616b8 100644 (file)
@@ -270,7 +270,7 @@ static int device_not_zone_append_capable(struct dm_target *ti,
                                          struct dm_dev *dev, sector_t start,
                                          sector_t len, void *data)
 {
-       return !blk_queue_is_zoned(bdev_get_queue(dev->bdev));
+       return !bdev_is_zoned(dev->bdev);
 }
 
 static bool dm_table_supports_zone_append(struct dm_table *t)
index 8872f9c..33d3799 100644 (file)
@@ -1033,7 +1033,7 @@ static void clone_endio(struct bio *bio)
        }
 
        if (static_branch_unlikely(&zoned_enabled) &&
-           unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev))))
+           unlikely(bdev_is_zoned(bio->bi_bdev)))
                dm_zone_endio(io, bio);
 
        if (endio) {