1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
12 #define DM_MSG_PREFIX "zoned"
14 #define DMZ_MIN_BIOS 8192
27 * Chunk work descriptor.
29 struct dm_chunk_work {
30 struct work_struct work;
32 struct dmz_target *target;
34 struct bio_list bio_list;
42 unsigned int nr_ddevs;
46 /* Zoned block device information */
49 /* For metadata handling */
50 struct dmz_metadata *metadata;
53 struct radix_tree_root chunk_rxtree;
54 struct workqueue_struct *chunk_wq;
55 struct mutex chunk_lock;
57 /* For cloned BIOs to zones */
58 struct bio_set bio_set;
61 spinlock_t flush_lock;
62 struct bio_list flush_list;
63 struct delayed_work flush_work;
64 struct workqueue_struct *flush_wq;
68 * Flush intervals (seconds).
70 #define DMZ_FLUSH_PERIOD (10 * HZ)
73 * Target BIO completion.
75 static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
77 struct dmz_bioctx *bioctx =
78 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
80 if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
81 bio->bi_status = status;
82 if (bioctx->dev && bio->bi_status != BLK_STS_OK)
83 bioctx->dev->flags |= DMZ_CHECK_BDEV;
85 if (refcount_dec_and_test(&bioctx->ref)) {
86 struct dm_zone *zone = bioctx->zone;
89 if (bio->bi_status != BLK_STS_OK &&
90 bio_op(bio) == REQ_OP_WRITE &&
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
93 dmz_deactivate_zone(zone);
100 * Completion callback for an internally cloned target BIO. This terminates the
101 * target BIO when there are no more references to its context.
103 static void dmz_clone_endio(struct bio *clone)
105 struct dmz_bioctx *bioctx = clone->bi_private;
106 blk_status_t status = clone->bi_status;
109 dmz_bio_endio(bioctx->bio, status);
113 * Issue a clone of a target BIO. The clone may only partially process the
114 * original target BIO.
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
117 struct bio *bio, sector_t chunk_block,
118 unsigned int nr_blocks)
120 struct dmz_bioctx *bioctx =
121 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
122 struct dmz_dev *dev = zone->dev;
125 if (dev->flags & DMZ_BDEV_DYING)
128 clone = bio_alloc_clone(dev->bdev, bio, GFP_NOIO, &dmz->bio_set);
133 clone->bi_iter.bi_sector =
134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
135 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
136 clone->bi_end_io = dmz_clone_endio;
137 clone->bi_private = bioctx;
139 bio_advance(bio, clone->bi_iter.bi_size);
141 refcount_inc(&bioctx->ref);
142 submit_bio_noacct(clone);
144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
145 zone->wp_block += nr_blocks;
151 * Zero out pages of discarded blocks accessed by a read BIO.
153 static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
154 sector_t chunk_block, unsigned int nr_blocks)
156 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
158 /* Clear nr_blocks */
159 swap(bio->bi_iter.bi_size, size);
161 swap(bio->bi_iter.bi_size, size);
163 bio_advance(bio, size);
167 * Process a read BIO.
169 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
172 struct dmz_metadata *zmd = dmz->metadata;
173 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
174 unsigned int nr_blocks = dmz_bio_blocks(bio);
175 sector_t end_block = chunk_block + nr_blocks;
176 struct dm_zone *rzone, *bzone;
179 /* Read into unmapped chunks need only zeroing the BIO buffer */
185 DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
186 dmz_metadata_label(zmd),
187 (unsigned long long)dmz_bio_chunk(zmd, bio),
188 (dmz_is_rnd(zone) ? "RND" :
189 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
191 (unsigned long long)chunk_block, nr_blocks);
193 /* Check block validity to determine the read location */
195 while (chunk_block < end_block) {
197 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
198 chunk_block < zone->wp_block) {
199 /* Test block validity in the data zone */
200 ret = dmz_block_valid(zmd, zone, chunk_block);
204 /* Read data zone blocks */
211 * No valid blocks found in the data zone.
212 * Check the buffer zone, if there is one.
214 if (!nr_blocks && bzone) {
215 ret = dmz_block_valid(zmd, bzone, chunk_block);
219 /* Read buffer zone blocks */
226 /* Valid blocks found: read them */
227 nr_blocks = min_t(unsigned int, nr_blocks,
228 end_block - chunk_block);
229 ret = dmz_submit_bio(dmz, rzone, bio,
230 chunk_block, nr_blocks);
233 chunk_block += nr_blocks;
235 /* No valid block: zeroout the current BIO block */
236 dmz_handle_read_zero(dmz, bio, chunk_block, 1);
245 * Write blocks directly in a data zone, at the write pointer.
246 * If a buffer zone is assigned, invalidate the blocks written
249 static int dmz_handle_direct_write(struct dmz_target *dmz,
250 struct dm_zone *zone, struct bio *bio,
251 sector_t chunk_block,
252 unsigned int nr_blocks)
254 struct dmz_metadata *zmd = dmz->metadata;
255 struct dm_zone *bzone = zone->bzone;
258 if (dmz_is_readonly(zone))
262 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
267 * Validate the blocks in the data zone and invalidate
268 * in the buffer zone, if there is one.
270 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
271 if (ret == 0 && bzone)
272 ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
278 * Write blocks in the buffer zone of @zone.
279 * If no buffer zone is assigned yet, get one.
280 * Called with @zone write locked.
282 static int dmz_handle_buffered_write(struct dmz_target *dmz,
283 struct dm_zone *zone, struct bio *bio,
284 sector_t chunk_block,
285 unsigned int nr_blocks)
287 struct dmz_metadata *zmd = dmz->metadata;
288 struct dm_zone *bzone;
291 /* Get the buffer zone. One will be allocated if needed */
292 bzone = dmz_get_chunk_buffer(zmd, zone);
294 return PTR_ERR(bzone);
296 if (dmz_is_readonly(bzone))
300 ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
305 * Validate the blocks in the buffer zone
306 * and invalidate in the data zone.
308 ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
309 if (ret == 0 && chunk_block < zone->wp_block)
310 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
316 * Process a write BIO.
318 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
321 struct dmz_metadata *zmd = dmz->metadata;
322 sector_t chunk_block = dmz_chunk_block(zmd, dmz_bio_block(bio));
323 unsigned int nr_blocks = dmz_bio_blocks(bio);
328 DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
329 dmz_metadata_label(zmd),
330 (unsigned long long)dmz_bio_chunk(zmd, bio),
331 (dmz_is_rnd(zone) ? "RND" :
332 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
334 (unsigned long long)chunk_block, nr_blocks);
336 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
337 chunk_block == zone->wp_block) {
339 * zone is a random zone or it is a sequential zone
340 * and the BIO is aligned to the zone write pointer:
341 * direct write the zone.
343 return dmz_handle_direct_write(dmz, zone, bio,
344 chunk_block, nr_blocks);
348 * This is an unaligned write in a sequential zone:
349 * use buffered write.
351 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
355 * Process a discard BIO.
357 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
360 struct dmz_metadata *zmd = dmz->metadata;
361 sector_t block = dmz_bio_block(bio);
362 unsigned int nr_blocks = dmz_bio_blocks(bio);
363 sector_t chunk_block = dmz_chunk_block(zmd, block);
366 /* For unmapped chunks, there is nothing to do */
370 if (dmz_is_readonly(zone))
373 DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
374 dmz_metadata_label(dmz->metadata),
375 (unsigned long long)dmz_bio_chunk(zmd, bio),
377 (unsigned long long)chunk_block, nr_blocks);
380 * Invalidate blocks in the data zone and its
381 * buffer zone if one is mapped.
383 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
384 chunk_block < zone->wp_block)
385 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
386 if (ret == 0 && zone->bzone)
387 ret = dmz_invalidate_blocks(zmd, zone->bzone,
388 chunk_block, nr_blocks);
395 static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
398 struct dmz_bioctx *bioctx =
399 dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
400 struct dmz_metadata *zmd = dmz->metadata;
401 struct dm_zone *zone;
404 dmz_lock_metadata(zmd);
407 * Get the data zone mapping the chunk. There may be no
408 * mapping for read and discard. If a mapping is obtained,
409 + the zone returned will be set to active state.
411 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
418 /* Process the BIO */
420 dmz_activate_zone(zone);
422 dmz_reclaim_bio_acc(zone->dev->reclaim);
425 switch (bio_op(bio)) {
427 ret = dmz_handle_read(dmz, zone, bio);
430 ret = dmz_handle_write(dmz, zone, bio);
433 case REQ_OP_WRITE_ZEROES:
434 ret = dmz_handle_discard(dmz, zone, bio);
437 DMERR("(%s): Unsupported BIO operation 0x%x",
438 dmz_metadata_label(dmz->metadata), bio_op(bio));
443 * Release the chunk mapping. This will check that the mapping
444 * is still valid, that is, that the zone used still has valid blocks.
447 dmz_put_chunk_mapping(zmd, zone);
449 dmz_bio_endio(bio, errno_to_blk_status(ret));
451 dmz_unlock_metadata(zmd);
455 * Increment a chunk reference counter.
457 static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
459 refcount_inc(&cw->refcount);
463 * Decrement a chunk work reference count and
464 * free it if it becomes 0.
466 static void dmz_put_chunk_work(struct dm_chunk_work *cw)
468 if (refcount_dec_and_test(&cw->refcount)) {
469 WARN_ON(!bio_list_empty(&cw->bio_list));
470 radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
476 * Chunk BIO work function.
478 static void dmz_chunk_work(struct work_struct *work)
480 struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
481 struct dmz_target *dmz = cw->target;
484 mutex_lock(&dmz->chunk_lock);
486 /* Process the chunk BIOs */
487 while ((bio = bio_list_pop(&cw->bio_list))) {
488 mutex_unlock(&dmz->chunk_lock);
489 dmz_handle_bio(dmz, cw, bio);
490 mutex_lock(&dmz->chunk_lock);
491 dmz_put_chunk_work(cw);
494 /* Queueing the work incremented the work refcount */
495 dmz_put_chunk_work(cw);
497 mutex_unlock(&dmz->chunk_lock);
503 static void dmz_flush_work(struct work_struct *work)
505 struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
509 /* Flush dirty metadata blocks */
510 ret = dmz_flush_metadata(dmz->metadata);
512 DMDEBUG("(%s): Metadata flush failed, rc=%d",
513 dmz_metadata_label(dmz->metadata), ret);
515 /* Process queued flush requests */
517 spin_lock(&dmz->flush_lock);
518 bio = bio_list_pop(&dmz->flush_list);
519 spin_unlock(&dmz->flush_lock);
524 dmz_bio_endio(bio, errno_to_blk_status(ret));
527 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
531 * Get a chunk work and start it to process a new BIO.
532 * If the BIO chunk has no work yet, create one.
534 static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
536 unsigned int chunk = dmz_bio_chunk(dmz->metadata, bio);
537 struct dm_chunk_work *cw;
540 mutex_lock(&dmz->chunk_lock);
542 /* Get the BIO chunk work. If one is not active yet, create one */
543 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
545 dmz_get_chunk_work(cw);
547 /* Create a new chunk work */
548 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
554 INIT_WORK(&cw->work, dmz_chunk_work);
555 refcount_set(&cw->refcount, 1);
558 bio_list_init(&cw->bio_list);
560 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
567 bio_list_add(&cw->bio_list, bio);
569 if (queue_work(dmz->chunk_wq, &cw->work))
570 dmz_get_chunk_work(cw);
572 mutex_unlock(&dmz->chunk_lock);
577 * Check if the backing device is being removed. If it's on the way out,
578 * start failing I/O. Reclaim and metadata components also call this
579 * function to cleanly abort operation in the event of such failure.
581 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
583 if (dmz_dev->flags & DMZ_BDEV_DYING)
586 if (dmz_dev->flags & DMZ_CHECK_BDEV)
587 return !dmz_check_bdev(dmz_dev);
589 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
590 dmz_dev_warn(dmz_dev, "Backing device queue dying");
591 dmz_dev->flags |= DMZ_BDEV_DYING;
594 return dmz_dev->flags & DMZ_BDEV_DYING;
598 * Check the backing device availability. This detects such events as
599 * backing device going offline due to errors, media removals, etc.
600 * This check is less efficient than dmz_bdev_is_dying() and should
601 * only be performed as a part of error handling.
603 bool dmz_check_bdev(struct dmz_dev *dmz_dev)
605 struct gendisk *disk;
607 dmz_dev->flags &= ~DMZ_CHECK_BDEV;
609 if (dmz_bdev_is_dying(dmz_dev))
612 disk = dmz_dev->bdev->bd_disk;
613 if (disk->fops->check_events &&
614 disk->fops->check_events(disk, 0) & DISK_EVENT_MEDIA_CHANGE) {
615 dmz_dev_warn(dmz_dev, "Backing device offline");
616 dmz_dev->flags |= DMZ_BDEV_DYING;
619 return !(dmz_dev->flags & DMZ_BDEV_DYING);
625 static int dmz_map(struct dm_target *ti, struct bio *bio)
627 struct dmz_target *dmz = ti->private;
628 struct dmz_metadata *zmd = dmz->metadata;
629 struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
630 sector_t sector = bio->bi_iter.bi_sector;
631 unsigned int nr_sectors = bio_sectors(bio);
632 sector_t chunk_sector;
635 if (dmz_dev_is_dying(zmd))
636 return DM_MAPIO_KILL;
638 DMDEBUG("(%s): BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
639 dmz_metadata_label(zmd),
640 bio_op(bio), (unsigned long long)sector, nr_sectors,
641 (unsigned long long)dmz_bio_chunk(zmd, bio),
642 (unsigned long long)dmz_chunk_block(zmd, dmz_bio_block(bio)),
643 (unsigned int)dmz_bio_blocks(bio));
645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
646 return DM_MAPIO_REMAPPED;
648 /* The BIO should be block aligned */
649 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
650 return DM_MAPIO_KILL;
652 /* Initialize the BIO context */
656 refcount_set(&bioctx->ref, 1);
658 /* Set the BIO pending in the flush list */
659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
660 spin_lock(&dmz->flush_lock);
661 bio_list_add(&dmz->flush_list, bio);
662 spin_unlock(&dmz->flush_lock);
663 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
664 return DM_MAPIO_SUBMITTED;
667 /* Split zone BIOs to fit entirely into a zone */
668 chunk_sector = sector & (dmz_zone_nr_sectors(zmd) - 1);
669 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
670 dm_accept_partial_bio(bio, dmz_zone_nr_sectors(zmd) - chunk_sector);
672 /* Now ready to handle this BIO */
673 ret = dmz_queue_chunk_work(dmz, bio);
675 DMDEBUG("(%s): BIO op %d, can't process chunk %llu, err %i",
676 dmz_metadata_label(zmd),
677 bio_op(bio), (u64)dmz_bio_chunk(zmd, bio),
679 return DM_MAPIO_REQUEUE;
682 return DM_MAPIO_SUBMITTED;
686 * Get zoned device information.
688 static int dmz_get_zoned_device(struct dm_target *ti, char *path,
689 int idx, int nr_devs)
691 struct dmz_target *dmz = ti->private;
695 struct block_device *bdev;
697 /* Get the target device */
698 ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &ddev);
700 ti->error = "Get target device failed";
705 if (bdev_zoned_model(bdev) == BLK_ZONED_NONE) {
707 ti->error = "Invalid regular device";
711 ti->error = "First device must be a regular device";
715 ti->error = "Too many regular devices";
718 dev = &dmz->dev[idx];
719 dev->flags = DMZ_BDEV_REGULAR;
721 if (dmz->ddev[idx]) {
722 ti->error = "Too many zoned devices";
725 if (nr_devs > 1 && idx == 0) {
726 ti->error = "First device must be a regular device";
729 dev = &dmz->dev[idx];
734 dev->capacity = bdev_nr_sectors(bdev);
736 ti->error = "Partial mapping is not supported";
740 dmz->ddev[idx] = ddev;
744 dm_put_device(ti, ddev);
749 * Cleanup zoned device information.
751 static void dmz_put_zoned_devices(struct dm_target *ti)
753 struct dmz_target *dmz = ti->private;
756 for (i = 0; i < dmz->nr_ddevs; i++)
758 dm_put_device(ti, dmz->ddev[i]);
763 static int dmz_fixup_devices(struct dm_target *ti)
765 struct dmz_target *dmz = ti->private;
766 struct dmz_dev *reg_dev = NULL;
767 sector_t zone_nr_sectors = 0;
771 * When we have more than on devices, the first one must be a
772 * regular block device and the others zoned block devices.
774 if (dmz->nr_ddevs > 1) {
775 reg_dev = &dmz->dev[0];
776 if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
777 ti->error = "Primary disk is not a regular device";
780 for (i = 1; i < dmz->nr_ddevs; i++) {
781 struct dmz_dev *zoned_dev = &dmz->dev[i];
782 struct block_device *bdev = zoned_dev->bdev;
784 if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
785 ti->error = "Secondary disk is not a zoned device";
788 if (zone_nr_sectors &&
789 zone_nr_sectors != bdev_zone_sectors(bdev)) {
790 ti->error = "Zone nr sectors mismatch";
793 zone_nr_sectors = bdev_zone_sectors(bdev);
794 zoned_dev->zone_nr_sectors = zone_nr_sectors;
795 zoned_dev->nr_zones = bdev_nr_zones(bdev);
798 struct dmz_dev *zoned_dev = &dmz->dev[0];
799 struct block_device *bdev = zoned_dev->bdev;
801 if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
802 ti->error = "Disk is not a zoned device";
805 zoned_dev->zone_nr_sectors = bdev_zone_sectors(bdev);
806 zoned_dev->nr_zones = bdev_nr_zones(bdev);
810 sector_t zone_offset;
812 reg_dev->zone_nr_sectors = zone_nr_sectors;
814 DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
815 reg_dev->zone_nr_sectors);
816 reg_dev->zone_offset = 0;
817 zone_offset = reg_dev->nr_zones;
818 for (i = 1; i < dmz->nr_ddevs; i++) {
819 dmz->dev[i].zone_offset = zone_offset;
820 zone_offset += dmz->dev[i].nr_zones;
829 static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
831 struct dmz_target *dmz;
834 /* Check arguments */
836 ti->error = "Invalid argument count";
840 /* Allocate and initialize the target descriptor */
841 dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
843 ti->error = "Unable to allocate the zoned target descriptor";
846 dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
848 ti->error = "Unable to allocate the zoned device descriptors";
852 dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
854 ti->error = "Unable to allocate the dm device descriptors";
858 dmz->nr_ddevs = argc;
862 /* Get the target zoned block device */
863 for (i = 0; i < argc; i++) {
864 ret = dmz_get_zoned_device(ti, argv[i], i, argc);
868 ret = dmz_fixup_devices(ti);
872 /* Initialize metadata */
873 ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
874 dm_table_device_name(ti->table));
876 ti->error = "Metadata initialization failed";
880 /* Set target (no write same support) */
881 ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
882 ti->num_flush_bios = 1;
883 ti->num_discard_bios = 1;
884 ti->num_write_zeroes_bios = 1;
885 ti->per_io_data_size = sizeof(struct dmz_bioctx);
886 ti->flush_supported = true;
887 ti->discards_supported = true;
889 /* The exposed capacity is the number of chunks that can be mapped */
890 ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) <<
891 dmz_zone_nr_sectors_shift(dmz->metadata);
894 ret = bioset_init(&dmz->bio_set, DMZ_MIN_BIOS, 0, 0);
896 ti->error = "Create BIO set failed";
901 mutex_init(&dmz->chunk_lock);
902 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
903 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s",
904 WQ_MEM_RECLAIM | WQ_UNBOUND, 0,
905 dmz_metadata_label(dmz->metadata));
906 if (!dmz->chunk_wq) {
907 ti->error = "Create chunk workqueue failed";
913 spin_lock_init(&dmz->flush_lock);
914 bio_list_init(&dmz->flush_list);
915 INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
916 dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
917 dmz_metadata_label(dmz->metadata));
918 if (!dmz->flush_wq) {
919 ti->error = "Create flush workqueue failed";
923 mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
925 /* Initialize reclaim */
926 for (i = 0; i < dmz->nr_ddevs; i++) {
927 ret = dmz_ctr_reclaim(dmz->metadata, &dmz->dev[i].reclaim, i);
929 ti->error = "Zone reclaim initialization failed";
934 DMINFO("(%s): Target device: %llu 512-byte logical sectors (%llu blocks)",
935 dmz_metadata_label(dmz->metadata),
936 (unsigned long long)ti->len,
937 (unsigned long long)dmz_sect2blk(ti->len));
941 destroy_workqueue(dmz->flush_wq);
943 destroy_workqueue(dmz->chunk_wq);
945 mutex_destroy(&dmz->chunk_lock);
946 bioset_exit(&dmz->bio_set);
948 dmz_dtr_metadata(dmz->metadata);
950 dmz_put_zoned_devices(ti);
961 static void dmz_dtr(struct dm_target *ti)
963 struct dmz_target *dmz = ti->private;
966 destroy_workqueue(dmz->chunk_wq);
968 for (i = 0; i < dmz->nr_ddevs; i++)
969 dmz_dtr_reclaim(dmz->dev[i].reclaim);
971 cancel_delayed_work_sync(&dmz->flush_work);
972 destroy_workqueue(dmz->flush_wq);
974 (void) dmz_flush_metadata(dmz->metadata);
976 dmz_dtr_metadata(dmz->metadata);
978 bioset_exit(&dmz->bio_set);
980 dmz_put_zoned_devices(ti);
982 mutex_destroy(&dmz->chunk_lock);
989 * Setup target request queue limits.
991 static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
993 struct dmz_target *dmz = ti->private;
994 unsigned int chunk_sectors = dmz_zone_nr_sectors(dmz->metadata);
996 limits->logical_block_size = DMZ_BLOCK_SIZE;
997 limits->physical_block_size = DMZ_BLOCK_SIZE;
999 blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
1000 blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
1002 limits->discard_alignment = 0;
1003 limits->discard_granularity = DMZ_BLOCK_SIZE;
1004 limits->max_discard_sectors = chunk_sectors;
1005 limits->max_hw_discard_sectors = chunk_sectors;
1006 limits->max_write_zeroes_sectors = chunk_sectors;
1008 /* FS hint to try to align to the device zone size */
1009 limits->chunk_sectors = chunk_sectors;
1010 limits->max_sectors = chunk_sectors;
1012 /* We are exposing a drive-managed zoned block device */
1013 limits->zoned = BLK_ZONED_NONE;
1017 * Pass on ioctl to the backend device.
1019 static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1021 struct dmz_target *dmz = ti->private;
1022 struct dmz_dev *dev = &dmz->dev[0];
1024 if (!dmz_check_bdev(dev))
1033 * Stop works on suspend.
1035 static void dmz_suspend(struct dm_target *ti)
1037 struct dmz_target *dmz = ti->private;
1040 flush_workqueue(dmz->chunk_wq);
1041 for (i = 0; i < dmz->nr_ddevs; i++)
1042 dmz_suspend_reclaim(dmz->dev[i].reclaim);
1043 cancel_delayed_work_sync(&dmz->flush_work);
1047 * Restart works on resume or if suspend failed.
1049 static void dmz_resume(struct dm_target *ti)
1051 struct dmz_target *dmz = ti->private;
1054 queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
1055 for (i = 0; i < dmz->nr_ddevs; i++)
1056 dmz_resume_reclaim(dmz->dev[i].reclaim);
1059 static int dmz_iterate_devices(struct dm_target *ti,
1060 iterate_devices_callout_fn fn, void *data)
1062 struct dmz_target *dmz = ti->private;
1063 unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
1067 for (i = 0; i < dmz->nr_ddevs; i++) {
1068 capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
1069 r = fn(ti, dmz->ddev[i], 0, capacity, data);
1076 static void dmz_status(struct dm_target *ti, status_type_t type,
1077 unsigned int status_flags, char *result,
1078 unsigned int maxlen)
1080 struct dmz_target *dmz = ti->private;
1082 char buf[BDEVNAME_SIZE];
1083 struct dmz_dev *dev;
1087 case STATUSTYPE_INFO:
1088 DMEMIT("%u zones %u/%u cache",
1089 dmz_nr_zones(dmz->metadata),
1090 dmz_nr_unmap_cache_zones(dmz->metadata),
1091 dmz_nr_cache_zones(dmz->metadata));
1092 for (i = 0; i < dmz->nr_ddevs; i++) {
1094 * For a multi-device setup the first device
1095 * contains only cache zones.
1098 (dmz_nr_cache_zones(dmz->metadata) > 0))
1100 DMEMIT(" %u/%u random %u/%u sequential",
1101 dmz_nr_unmap_rnd_zones(dmz->metadata, i),
1102 dmz_nr_rnd_zones(dmz->metadata, i),
1103 dmz_nr_unmap_seq_zones(dmz->metadata, i),
1104 dmz_nr_seq_zones(dmz->metadata, i));
1107 case STATUSTYPE_TABLE:
1109 format_dev_t(buf, dev->bdev->bd_dev);
1111 for (i = 1; i < dmz->nr_ddevs; i++) {
1113 format_dev_t(buf, dev->bdev->bd_dev);
1117 case STATUSTYPE_IMA:
1123 static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
1124 char *result, unsigned int maxlen)
1126 struct dmz_target *dmz = ti->private;
1129 if (!strcasecmp(argv[0], "reclaim")) {
1132 for (i = 0; i < dmz->nr_ddevs; i++)
1133 dmz_schedule_reclaim(dmz->dev[i].reclaim);
1136 DMERR("unrecognized message %s", argv[0]);
1140 static struct target_type zoned_target = {
1142 .version = {2, 0, 0},
1143 .features = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
1144 .module = THIS_MODULE,
1148 .io_hints = dmz_io_hints,
1149 .prepare_ioctl = dmz_prepare_ioctl,
1150 .postsuspend = dmz_suspend,
1151 .resume = dmz_resume,
1152 .iterate_devices = dmz_iterate_devices,
1153 .status = dmz_status,
1154 .message = dmz_message,
1158 MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
1159 MODULE_AUTHOR("Damien Le Moal <damien.lemoal@wdc.com>");
1160 MODULE_LICENSE("GPL");