Move fields from the device structure into the metadata structure
and provide accessor functions.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
Reviewed-by: Bob Liu <bob.liu@oracle.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
unsigned int zone_nr_bitmap_blocks;
unsigned int zone_bits_per_mblk;
+ sector_t zone_nr_blocks;
+ sector_t zone_nr_blocks_shift;
+
+ sector_t zone_nr_sectors;
+ sector_t zone_nr_sectors_shift;
+
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
unsigned int nr_bitmap_blocks;
unsigned int nr_map_blocks;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
unsigned int nr_useable_zones;
unsigned int nr_meta_blocks;
unsigned int nr_meta_zones;
*/
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
*/
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)zone->id << zmd->dev->zone_nr_sectors_shift;
+ return (sector_t)zone->id << zmd->zone_nr_sectors_shift;
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
}
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
{
- return (sector_t)zone->id << zmd->dev->zone_nr_blocks_shift;
+ return (sector_t)zone->id << zmd->zone_nr_blocks_shift;
}
struct dmz_dev *dmz_zone_to_dev(struct dmz_metadata *zmd, struct dm_zone *zone)
}
struct dmz_dev *dmz_zone_to_dev(struct dmz_metadata *zmd, struct dm_zone *zone)
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks;
+}
+
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors;
+}
+
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
+{
+ return zmd->zone_nr_sectors_shift;
+}
+
unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
{
unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
{
- return zmd->dev->nr_zones;
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
}
unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
- nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
- >> dev->zone_nr_blocks_shift;
+ nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
+ >> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
if (!nr_meta_zones ||
nr_meta_zones >= zmd->nr_rnd_zones) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
*/
static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
{
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
int i;
struct dmz_mblock *mblk;
int i;
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
struct dmz_dev *dev = zmd->dev;
/* Ignore the eventual last runt (smaller) zone */
- if (blkz->len != dev->zone_nr_sectors) {
+ if (blkz->len != zmd->zone_nr_sectors) {
if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
if (blkz->start + blkz->len == dev->capacity)
return 0;
return -ENXIO;
- zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+ zmd->zone_nr_sectors = dev->zone_nr_sectors;
+ zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
+ zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
+ zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
zmd->zone_nr_bitmap_blocks =
max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
- zmd->zone_bits_per_mblk = min_t(sector_t, dev->zone_nr_blocks,
+ zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
DMZ_BLOCK_SIZE_BITS);
/* Allocate zone array */
- zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
+ zmd->nr_zones = dev->nr_zones;
+ zmd->zones = kcalloc(zmd->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
if (!zmd->zones)
return -ENOMEM;
dmz_dev_info(dev, "Using %zu B for zone information",
if (!zmd->zones)
return -ENOMEM;
dmz_dev_info(dev, "Using %zu B for zone information",
- sizeof(struct dm_zone) * dev->nr_zones);
+ sizeof(struct dm_zone) * zmd->nr_zones);
/*
* Get zone information and initialize zone descriptors. At the same
/*
* Get zone information and initialize zone descriptors. At the same
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
dmz_start_sect(zmd, zone),
- dev->zone_nr_sectors, GFP_NOIO);
+ zmd->zone_nr_sectors, GFP_NOIO);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
zone->id, ret);
if (ret) {
dmz_dev_err(dev, "Reset zone %u failed %d",
zone->id, ret);
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
if (dzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (dzone_id >= dev->nr_zones) {
+ if (dzone_id >= zmd->nr_zones) {
dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
chunk, dzone_id);
return -EIO;
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
if (bzone_id == DMZ_MAP_UNMAPPED)
goto next;
- if (bzone_id >= dev->nr_zones) {
+ if (bzone_id >= zmd->nr_zones) {
dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
chunk, bzone_id);
return -EIO;
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
* fully initialized. All remaining zones are unmapped data
* zones. Finish initializing those here.
*/
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
dzone = dmz_get(zmd, i);
if (dmz_is_meta(dzone))
continue;
dzone = dmz_get(zmd, i);
if (dmz_is_meta(dzone))
continue;
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
sector_t chunk_block = 0;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
if (IS_ERR(from_mblk))
return PTR_ERR(from_mblk);
int ret;
/* Get the zones bitmap blocks */
int ret;
/* Get the zones bitmap blocks */
- while (chunk_block < zmd->dev->zone_nr_blocks) {
+ while (chunk_block < zmd->zone_nr_blocks) {
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
if (ret <= 0)
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
sector_t chunk_block, unsigned int nr_blocks)
{
unsigned int count, bit, nr_bits;
- unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
struct dmz_mblock *mblk;
unsigned int n = 0;
struct dmz_mblock *mblk;
unsigned int n = 0;
dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
zone->id, (u64)chunk_block, nr_blocks);
dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
zone->id, (u64)chunk_block, nr_blocks);
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
while (nr_blocks) {
/* Get bitmap block */
struct dmz_mblock *mblk;
int ret;
struct dmz_mblock *mblk;
int ret;
- WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block >= zmd->zone_nr_blocks);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
/* Get bitmap block */
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
unsigned long *bitmap;
int n = 0;
unsigned long *bitmap;
int n = 0;
- WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+ WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
while (nr_blocks) {
/* Get bitmap block */
while (nr_blocks) {
/* Get bitmap block */
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
/* The block is valid: get the number of valid blocks from block */
return dmz_to_next_set_block(zmd, zone, chunk_block,
- zmd->dev->zone_nr_blocks - chunk_block, 0);
+ zmd->zone_nr_blocks - chunk_block, 0);
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
int ret;
ret = dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 1);
+ zmd->zone_nr_blocks - start_block, 1);
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
*chunk_block = start_block;
return dmz_to_next_set_block(zmd, zone, start_block,
- zmd->dev->zone_nr_blocks - start_block, 0);
+ zmd->zone_nr_blocks - start_block, 0);
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
struct dmz_mblock *mblk;
sector_t chunk_block = 0;
unsigned int bit, nr_bits;
- unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
+ unsigned int nr_blocks = zmd->zone_nr_blocks;
dmz_dev_info(dev, " %llu 512-byte logical sectors",
(u64)dev->capacity);
dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
dmz_dev_info(dev, " %llu 512-byte logical sectors",
(u64)dev->capacity);
dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
- dev->nr_zones, (u64)dev->zone_nr_sectors);
+ zmd->nr_zones, (u64)zmd->zone_nr_sectors);
dmz_dev_info(dev, " %u metadata zones",
zmd->nr_meta_zones * 2);
dmz_dev_info(dev, " %u data zones for %u chunks",
dmz_dev_info(dev, " %u metadata zones",
zmd->nr_meta_zones * 2);
dmz_dev_info(dev, " %u data zones for %u chunks",
int ret;
/* Check zones */
int ret;
/* Check zones */
- for (i = 0; i < dev->nr_zones; i++) {
+ for (i = 0; i < zmd->nr_zones; i++) {
zone = dmz_get(zmd, i);
if (!zone) {
dmz_dev_err(dev, "Unable to get zone %u", i);
zone = dmz_get(zmd, i);
if (!zone) {
dmz_dev_err(dev, "Unable to get zone %u", i);
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
i, (u64)zone->wp_block, (u64)wp_block);
zone->wp_block = wp_block;
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
- dev->zone_nr_blocks - zone->wp_block);
+ zmd->zone_nr_blocks - zone->wp_block);
if (dmz_is_seq(src_zone))
end_block = src_zone->wp_block;
else
if (dmz_is_seq(src_zone))
end_block = src_zone->wp_block;
else
- end_block = dev->zone_nr_blocks;
+ end_block = dmz_zone_nr_blocks(zmd);
src_zone_block = dmz_start_block(zmd, src_zone);
dst_zone_block = dmz_start_block(zmd, dst_zone);
src_zone_block = dmz_start_block(zmd, src_zone);
dst_zone_block = dmz_start_block(zmd, dst_zone);
ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
if (ret == 0) {
/* Free the buffer zone */
ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
if (ret == 0) {
/* Free the buffer zone */
- dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unlock_zone_reclaim(dzone);
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unlock_zone_reclaim(dzone);
* Free the data zone and remap the chunk to
* the buffer zone.
*/
* Free the data zone and remap the chunk to
* the buffer zone.
*/
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unmap_zone(zmd, dzone);
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, bzone);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_map(zmd);
} else {
/* Free the data zone and remap the chunk */
dmz_unlock_map(zmd);
} else {
/* Free the data zone and remap the chunk */
- dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_zone_reclaim(dzone);
dmz_lock_map(zmd);
dmz_unmap_zone(zmd, dzone);
dmz_unlock_zone_reclaim(dzone);
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
sector_t end_block = chunk_block + nr_blocks;
struct dm_zone *rzone, *bzone;
unsigned int nr_blocks = dmz_bio_blocks(bio);
sector_t end_block = chunk_block + nr_blocks;
struct dm_zone *rzone, *bzone;
}
dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
}
dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
(dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
(dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
nr_blocks = 0;
if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
nr_blocks = 0;
if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
/* Test block validity in the data zone */
- ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
+ ret = dmz_block_valid(zmd, zone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
if (ret < 0)
return ret;
if (ret > 0) {
* Check the buffer zone, if there is one.
*/
if (!nr_blocks && bzone) {
* Check the buffer zone, if there is one.
*/
if (!nr_blocks && bzone) {
- ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
+ ret = dmz_block_valid(zmd, bzone, chunk_block);
if (ret < 0)
return ret;
if (ret > 0) {
if (ret < 0)
return ret;
if (ret > 0) {
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
struct bio *bio)
{
- sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
unsigned int nr_blocks = dmz_bio_blocks(bio);
if (!zone)
return -ENOSPC;
dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
unsigned int nr_blocks = dmz_bio_blocks(bio);
if (!zone)
return -ENOSPC;
dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
(dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
(dmz_is_rnd(zone) ? "RND" : "SEQ"),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
struct dmz_metadata *zmd = dmz->metadata;
sector_t block = dmz_bio_block(bio);
unsigned int nr_blocks = dmz_bio_blocks(bio);
struct dmz_metadata *zmd = dmz->metadata;
sector_t block = dmz_bio_block(bio);
unsigned int nr_blocks = dmz_bio_blocks(bio);
- sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
+ sector_t chunk_block = dmz_chunk_block(zmd, block);
int ret = 0;
/* For unmapped chunks, there is nothing to do */
int ret = 0;
/* For unmapped chunks, there is nothing to do */
return -EROFS;
dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
return -EROFS;
dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
zone->id,
(unsigned long long)chunk_block, nr_blocks);
zone->id,
(unsigned long long)chunk_block, nr_blocks);
* mapping for read and discard. If a mapping is obtained,
+ the zone returned will be set to active state.
*/
* mapping for read and discard. If a mapping is obtained,
+ the zone returned will be set to active state.
*/
- zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
bio_op(bio));
if (IS_ERR(zone)) {
ret = PTR_ERR(zone);
bio_op(bio));
if (IS_ERR(zone)) {
ret = PTR_ERR(zone);
*/
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
*/
static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
- unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
struct dm_chunk_work *cw;
int ret = 0;
struct dm_chunk_work *cw;
int ret = 0;
static int dmz_map(struct dm_target *ti, struct bio *bio)
{
struct dmz_target *dmz = ti->private;
static int dmz_map(struct dm_target *ti, struct bio *bio)
{
struct dmz_target *dmz = ti->private;
+ struct dmz_metadata *zmd = dmz->metadata;
struct dmz_dev *dev = dmz->dev;
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector = bio->bi_iter.bi_sector;
struct dmz_dev *dev = dmz->dev;
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
sector_t sector = bio->bi_iter.bi_sector;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
- (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
- (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
+ (unsigned long long)dmz_bio_chunk(zmd, bio),
+ (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
(unsigned int)dmz_bio_blocks(bio));
bio_set_dev(bio, dev->bdev);
(unsigned int)dmz_bio_blocks(bio));
bio_set_dev(bio, dev->bdev);
}
/* Split zone BIOs to fit entirely into a zone */
}
/* Split zone BIOs to fit entirely into a zone */
- chunk_sector = sector & (dev->zone_nr_sectors - 1);
- if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
- dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+ chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
+ if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
+ dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
/* Now ready to handle this BIO */
ret = dmz_queue_chunk_work(dmz, bio);
if (ret) {
dmz_dev_debug(dmz->dev,
"BIO op %d, can't process chunk %llu, err %i\n",
/* Now ready to handle this BIO */
ret = dmz_queue_chunk_work(dmz, bio);
if (ret) {
dmz_dev_debug(dmz->dev,
"BIO op %d, can't process chunk %llu, err %i\n",
- bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+ bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
ret);
return DM_MAPIO_REQUEUE;
}
ret);
return DM_MAPIO_REQUEUE;
}
}
dev->zone_nr_sectors = blk_queue_zone_sectors(q);
}
dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
-
- dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
- dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
dev->nr_zones = blkdev_nr_zones(dev->bdev->bd_disk);
}
/* Set target (no write same support) */
}
/* Set target (no write same support) */
- ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
ti->discards_supported = true;
/* The exposed capacity is the number of chunks that can be mapped */
ti->discards_supported = true;
/* The exposed capacity is the number of chunks that can be mapped */
- ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
+ dmz_zone_nr_sectors_shift(dmz->metadata);
/* Zone BIO */
ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
/* Zone BIO */
ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dmz_target *dmz = ti->private;
static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct dmz_target *dmz = ti->private;
- unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
+ unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
limits->logical_block_size = DMZ_BLOCK_SIZE;
limits->physical_block_size = DMZ_BLOCK_SIZE;
limits->logical_block_size = DMZ_BLOCK_SIZE;
limits->physical_block_size = DMZ_BLOCK_SIZE;
{
struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = dmz->dev;
{
struct dmz_target *dmz = ti->private;
struct dmz_dev *dev = dmz->dev;
- sector_t capacity = dev->capacity & ~(dev->zone_nr_sectors - 1);
+ sector_t capacity = dev->capacity & ~(dmz_zone_nr_sectors(dmz->metadata) - 1);
return fn(ti, dmz->ddev, 0, capacity, data);
}
return fn(ti, dmz->ddev, 0, capacity, data);
}
unsigned int flags;
sector_t zone_nr_sectors;
unsigned int flags;
sector_t zone_nr_sectors;
- unsigned int zone_nr_sectors_shift;
-
- sector_t zone_nr_blocks;
- sector_t zone_nr_blocks_shift;
-#define dmz_bio_chunk(dev, bio) ((bio)->bi_iter.bi_sector >> \
- (dev)->zone_nr_sectors_shift)
-#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+#define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
+ dmz_zone_nr_sectors_shift(zmd))
+#define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
/* Device flags. */
#define DMZ_BDEV_DYING (1 << 0)
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd);
unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
+unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
/*
* Activate a zone (increment its reference count).
/*
* Activate a zone (increment its reference count).