2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
29 * ratelimit state to be used in DMXXX_LIMIT().
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
32 DEFAULT_RATELIMIT_INTERVAL,
33 DEFAULT_RATELIMIT_BURST);
34 EXPORT_SYMBOL(dm_ratelimit_state);
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
44 static const char *_name = DM_NAME;
46 static unsigned int major = 0;
47 static unsigned int _major = 0;
49 static DEFINE_IDR(_minor_idr);
51 static DEFINE_SPINLOCK(_minor_lock);
53 static void do_deferred_remove(struct work_struct *w);
55 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
57 static struct workqueue_struct *deferred_remove_workqueue;
61 * One of these is allocated per bio.
64 struct mapped_device *md;
68 unsigned long start_time;
69 spinlock_t endio_lock;
70 struct dm_stats_aux stats_aux;
74 * For request-based dm.
75 * One of these is allocated per request.
77 struct dm_rq_target_io {
78 struct mapped_device *md;
80 struct request *orig, clone;
86 * For request-based dm - the bio clones we allocate are embedded in these
89 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
90 * the bioset is created - this means the bio has to come at the end of the
93 struct dm_rq_clone_bio_info {
95 struct dm_rq_target_io *tio;
99 union map_info *dm_get_mapinfo(struct bio *bio)
101 if (bio && bio->bi_private)
102 return &((struct dm_target_io *)bio->bi_private)->info;
106 union map_info *dm_get_rq_mapinfo(struct request *rq)
108 if (rq && rq->end_io_data)
109 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
112 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
114 #define MINOR_ALLOCED ((void *)-1)
117 * Bits for the md->flags field.
119 #define DMF_BLOCK_IO_FOR_SUSPEND 0
120 #define DMF_SUSPENDED 1
122 #define DMF_FREEING 3
123 #define DMF_DELETING 4
124 #define DMF_NOFLUSH_SUSPENDING 5
125 #define DMF_MERGE_IS_OPTIONAL 6
126 #define DMF_DEFERRED_REMOVE 7
129 * A dummy definition to make RCU happy.
130 * struct dm_table should never be dereferenced in this file.
137 * Work processed by per-device workqueue.
139 struct mapped_device {
140 struct srcu_struct io_barrier;
141 struct mutex suspend_lock;
146 * The current mapping.
147 * Use dm_get_live_table{_fast} or take suspend_lock for
150 struct dm_table *map;
154 struct request_queue *queue;
156 /* Protect queue and type against concurrent access. */
157 struct mutex type_lock;
159 struct target_type *immutable_target_type;
161 struct gendisk *disk;
167 * A list of ios that arrived while we were suspended.
170 wait_queue_head_t wait;
171 struct work_struct work;
172 struct bio_list deferred;
173 spinlock_t deferred_lock;
176 * Processing queue (flush)
178 struct workqueue_struct *wq;
181 * io objects are allocated from here.
191 wait_queue_head_t eventq;
193 struct list_head uevent_list;
194 spinlock_t uevent_lock; /* Protect access to uevent_list */
197 * freeze/thaw support require holding onto a super block
199 struct super_block *frozen_sb;
200 struct block_device *bdev;
202 /* forced geometry settings */
203 struct hd_geometry geometry;
205 /* kobject and completion */
206 struct dm_kobject_holder kobj_holder;
208 /* zero-length flush that will be cloned and submitted to targets */
209 struct bio flush_bio;
211 struct dm_stats stats;
215 * For mempools pre-allocation at the table loading time.
217 struct dm_md_mempools {
222 #define RESERVED_BIO_BASED_IOS 16
223 #define RESERVED_REQUEST_BASED_IOS 256
224 #define RESERVED_MAX_IOS 1024
225 static struct kmem_cache *_io_cache;
226 static struct kmem_cache *_rq_tio_cache;
229 * Bio-based DM's mempools' reserved IOs set by the user.
231 static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
234 * Request-based DM's mempools' reserved IOs set by the user.
236 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
238 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
239 unsigned def, unsigned max)
241 unsigned ios = ACCESS_ONCE(*reserved_ios);
242 unsigned modified_ios = 0;
250 (void)cmpxchg(reserved_ios, ios, modified_ios);
257 unsigned dm_get_reserved_bio_based_ios(void)
259 return __dm_get_reserved_ios(&reserved_bio_based_ios,
260 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
262 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
264 unsigned dm_get_reserved_rq_based_ios(void)
266 return __dm_get_reserved_ios(&reserved_rq_based_ios,
267 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
269 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
271 static int __init local_init(void)
275 /* allocate a slab for the dm_ios */
276 _io_cache = KMEM_CACHE(dm_io, 0);
280 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
282 goto out_free_io_cache;
284 r = dm_uevent_init();
286 goto out_free_rq_tio_cache;
288 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
289 if (!deferred_remove_workqueue) {
291 goto out_uevent_exit;
295 r = register_blkdev(_major, _name);
297 goto out_free_workqueue;
305 destroy_workqueue(deferred_remove_workqueue);
308 out_free_rq_tio_cache:
309 kmem_cache_destroy(_rq_tio_cache);
311 kmem_cache_destroy(_io_cache);
316 static void local_exit(void)
318 flush_scheduled_work();
319 destroy_workqueue(deferred_remove_workqueue);
321 kmem_cache_destroy(_rq_tio_cache);
322 kmem_cache_destroy(_io_cache);
323 unregister_blkdev(_major, _name);
328 DMINFO("cleaned up");
331 static int (*_inits[])(void) __initdata = {
342 static void (*_exits[])(void) = {
353 static int __init dm_init(void)
355 const int count = ARRAY_SIZE(_inits);
359 for (i = 0; i < count; i++) {
374 static void __exit dm_exit(void)
376 int i = ARRAY_SIZE(_exits);
382 * Should be empty by this point.
384 idr_destroy(&_minor_idr);
388 * Block device functions
390 int dm_deleting_md(struct mapped_device *md)
392 return test_bit(DMF_DELETING, &md->flags);
395 static int dm_blk_open(struct block_device *bdev, fmode_t mode)
397 struct mapped_device *md;
399 spin_lock(&_minor_lock);
401 md = bdev->bd_disk->private_data;
405 if (test_bit(DMF_FREEING, &md->flags) ||
406 dm_deleting_md(md)) {
412 atomic_inc(&md->open_count);
415 spin_unlock(&_minor_lock);
417 return md ? 0 : -ENXIO;
420 static void dm_blk_close(struct gendisk *disk, fmode_t mode)
422 struct mapped_device *md = disk->private_data;
424 spin_lock(&_minor_lock);
426 if (atomic_dec_and_test(&md->open_count) &&
427 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
428 queue_work(deferred_remove_workqueue, &deferred_remove_work);
432 spin_unlock(&_minor_lock);
435 int dm_open_count(struct mapped_device *md)
437 return atomic_read(&md->open_count);
441 * Guarantees nothing is using the device before it's deleted.
443 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
447 spin_lock(&_minor_lock);
449 if (dm_open_count(md)) {
452 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
453 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
456 set_bit(DMF_DELETING, &md->flags);
458 spin_unlock(&_minor_lock);
463 int dm_cancel_deferred_remove(struct mapped_device *md)
467 spin_lock(&_minor_lock);
469 if (test_bit(DMF_DELETING, &md->flags))
472 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
474 spin_unlock(&_minor_lock);
479 static void do_deferred_remove(struct work_struct *w)
481 dm_deferred_remove();
484 sector_t dm_get_size(struct mapped_device *md)
486 return get_capacity(md->disk);
489 struct dm_stats *dm_get_stats(struct mapped_device *md)
494 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
496 struct mapped_device *md = bdev->bd_disk->private_data;
498 return dm_get_geometry(md, geo);
501 static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
502 unsigned int cmd, unsigned long arg)
504 struct mapped_device *md = bdev->bd_disk->private_data;
506 struct dm_table *map;
507 struct dm_target *tgt;
511 map = dm_get_live_table(md, &srcu_idx);
513 if (!map || !dm_table_get_size(map))
516 /* We only support devices that have a single target */
517 if (dm_table_get_num_targets(map) != 1)
520 tgt = dm_table_get_target(map, 0);
522 if (dm_suspended_md(md)) {
527 if (tgt->type->ioctl)
528 r = tgt->type->ioctl(tgt, cmd, arg);
531 dm_put_live_table(md, srcu_idx);
533 if (r == -ENOTCONN) {
541 static struct dm_io *alloc_io(struct mapped_device *md)
543 return mempool_alloc(md->io_pool, GFP_NOIO);
546 static void free_io(struct mapped_device *md, struct dm_io *io)
548 mempool_free(io, md->io_pool);
551 static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
553 bio_put(&tio->clone);
556 static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
559 return mempool_alloc(md->io_pool, gfp_mask);
562 static void free_rq_tio(struct dm_rq_target_io *tio)
564 mempool_free(tio, tio->md->io_pool);
567 static int md_in_flight(struct mapped_device *md)
569 return atomic_read(&md->pending[READ]) +
570 atomic_read(&md->pending[WRITE]);
573 static void start_io_acct(struct dm_io *io)
575 struct mapped_device *md = io->md;
576 struct bio *bio = io->bio;
578 int rw = bio_data_dir(bio);
580 io->start_time = jiffies;
582 cpu = part_stat_lock();
583 part_round_stats(cpu, &dm_disk(md)->part0);
585 atomic_set(&dm_disk(md)->part0.in_flight[rw],
586 atomic_inc_return(&md->pending[rw]));
588 if (unlikely(dm_stats_used(&md->stats)))
589 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
590 bio_sectors(bio), false, 0, &io->stats_aux);
593 static void end_io_acct(struct dm_io *io)
595 struct mapped_device *md = io->md;
596 struct bio *bio = io->bio;
597 unsigned long duration = jiffies - io->start_time;
599 int rw = bio_data_dir(bio);
601 cpu = part_stat_lock();
602 part_round_stats(cpu, &dm_disk(md)->part0);
603 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
606 if (unlikely(dm_stats_used(&md->stats)))
607 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
608 bio_sectors(bio), true, duration, &io->stats_aux);
611 * After this is decremented the bio must not be touched if it is
614 pending = atomic_dec_return(&md->pending[rw]);
615 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
616 pending += atomic_read(&md->pending[rw^0x1]);
618 /* nudge anyone waiting on suspend queue */
624 * Add the bio to the list of deferred io.
626 static void queue_io(struct mapped_device *md, struct bio *bio)
630 spin_lock_irqsave(&md->deferred_lock, flags);
631 bio_list_add(&md->deferred, bio);
632 spin_unlock_irqrestore(&md->deferred_lock, flags);
633 queue_work(md->wq, &md->work);
637 * Everyone (including functions in this file), should use this
638 * function to access the md->map field, and make sure they call
639 * dm_put_live_table() when finished.
641 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
643 *srcu_idx = srcu_read_lock(&md->io_barrier);
645 return srcu_dereference(md->map, &md->io_barrier);
648 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
650 srcu_read_unlock(&md->io_barrier, srcu_idx);
653 void dm_sync_table(struct mapped_device *md)
655 synchronize_srcu(&md->io_barrier);
656 synchronize_rcu_expedited();
660 * A fast alternative to dm_get_live_table/dm_put_live_table.
661 * The caller must not block between these two functions.
663 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
666 return rcu_dereference(md->map);
669 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
675 * Get the geometry associated with a dm device
677 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
685 * Set the geometry of a device.
687 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
689 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
691 if (geo->start > sz) {
692 DMWARN("Start sector is beyond the geometry limits.");
701 /*-----------------------------------------------------------------
703 * A more elegant soln is in the works that uses the queue
704 * merge fn, unfortunately there are a couple of changes to
705 * the block layer that I want to make for this. So in the
706 * interests of getting something for people to use I give
707 * you this clearly demarcated crap.
708 *---------------------------------------------------------------*/
710 static int __noflush_suspending(struct mapped_device *md)
712 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
716 * Decrements the number of outstanding ios that a bio has been
717 * cloned into, completing the original io if necc.
719 static void dec_pending(struct dm_io *io, int error)
724 struct mapped_device *md = io->md;
726 /* Push-back supersedes any I/O errors */
727 if (unlikely(error)) {
728 spin_lock_irqsave(&io->endio_lock, flags);
729 if (!(io->error > 0 && __noflush_suspending(md)))
731 spin_unlock_irqrestore(&io->endio_lock, flags);
734 if (atomic_dec_and_test(&io->io_count)) {
735 if (io->error == DM_ENDIO_REQUEUE) {
737 * Target requested pushing back the I/O.
739 spin_lock_irqsave(&md->deferred_lock, flags);
740 if (__noflush_suspending(md))
741 bio_list_add_head(&md->deferred, io->bio);
743 /* noflush suspend was interrupted. */
745 spin_unlock_irqrestore(&md->deferred_lock, flags);
748 io_error = io->error;
753 if (io_error == DM_ENDIO_REQUEUE)
756 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
758 * Preflush done for flush with data, reissue
761 bio->bi_rw &= ~REQ_FLUSH;
764 /* done with normal IO or empty flush */
765 trace_block_bio_complete(md->queue, bio, io_error);
766 bio_endio(bio, io_error);
771 static void clone_endio(struct bio *bio, int error)
774 struct dm_target_io *tio = bio->bi_private;
775 struct dm_io *io = tio->io;
776 struct mapped_device *md = tio->io->md;
777 dm_endio_fn endio = tio->ti->type->end_io;
779 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
783 r = endio(tio->ti, bio, error);
784 if (r < 0 || r == DM_ENDIO_REQUEUE)
786 * error and requeue request are handled
790 else if (r == DM_ENDIO_INCOMPLETE)
791 /* The target will handle the io */
794 DMWARN("unimplemented target endio return value: %d", r);
800 dec_pending(io, error);
804 * Partial completion handling for request-based dm
806 static void end_clone_bio(struct bio *clone, int error)
808 struct dm_rq_clone_bio_info *info = clone->bi_private;
809 struct dm_rq_target_io *tio = info->tio;
810 struct bio *bio = info->orig;
811 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
817 * An error has already been detected on the request.
818 * Once error occurred, just let clone->end_io() handle
824 * Don't notice the error to the upper layer yet.
825 * The error handling decision is made by the target driver,
826 * when the request is completed.
833 * I/O for the bio successfully completed.
834 * Notice the data completion to the upper layer.
838 * bios are processed from the head of the list.
839 * So the completing bio should always be rq->bio.
840 * If it's not, something wrong is happening.
842 if (tio->orig->bio != bio)
843 DMERR("bio completion is going in the middle of the request");
846 * Update the original request.
847 * Do not use blk_end_request() here, because it may complete
848 * the original request before the clone, and break the ordering.
850 blk_update_request(tio->orig, 0, nr_bytes);
854 * Don't touch any member of the md after calling this function because
855 * the md may be freed in dm_put() at the end of this function.
856 * Or do dm_get() before calling this function and dm_put() later.
858 static void rq_completed(struct mapped_device *md, int rw, int run_queue)
860 atomic_dec(&md->pending[rw]);
862 /* nudge anyone waiting on suspend queue */
863 if (!md_in_flight(md))
867 * Run this off this callpath, as drivers could invoke end_io while
868 * inside their request_fn (and holding the queue lock). Calling
869 * back into ->request_fn() could deadlock attempting to grab the
873 blk_run_queue_async(md->queue);
876 * dm_put() must be at the end of this function. See the comment above
881 static void free_rq_clone(struct request *clone)
883 struct dm_rq_target_io *tio = clone->end_io_data;
885 blk_rq_unprep_clone(clone);
890 * Complete the clone and the original request.
891 * Must be called without queue lock.
893 static void dm_end_request(struct request *clone, int error)
895 int rw = rq_data_dir(clone);
896 struct dm_rq_target_io *tio = clone->end_io_data;
897 struct mapped_device *md = tio->md;
898 struct request *rq = tio->orig;
900 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
901 rq->errors = clone->errors;
902 rq->resid_len = clone->resid_len;
906 * We are using the sense buffer of the original
908 * So setting the length of the sense data is enough.
910 rq->sense_len = clone->sense_len;
913 free_rq_clone(clone);
914 blk_end_request_all(rq, error);
915 rq_completed(md, rw, true);
918 static void dm_unprep_request(struct request *rq)
920 struct request *clone = rq->special;
923 rq->cmd_flags &= ~REQ_DONTPREP;
925 free_rq_clone(clone);
929 * Requeue the original request of a clone.
931 void dm_requeue_unmapped_request(struct request *clone)
933 int rw = rq_data_dir(clone);
934 struct dm_rq_target_io *tio = clone->end_io_data;
935 struct mapped_device *md = tio->md;
936 struct request *rq = tio->orig;
937 struct request_queue *q = rq->q;
940 dm_unprep_request(rq);
942 spin_lock_irqsave(q->queue_lock, flags);
943 blk_requeue_request(q, rq);
944 spin_unlock_irqrestore(q->queue_lock, flags);
946 rq_completed(md, rw, 0);
948 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
950 static void __stop_queue(struct request_queue *q)
955 static void stop_queue(struct request_queue *q)
959 spin_lock_irqsave(q->queue_lock, flags);
961 spin_unlock_irqrestore(q->queue_lock, flags);
964 static void __start_queue(struct request_queue *q)
966 if (blk_queue_stopped(q))
970 static void start_queue(struct request_queue *q)
974 spin_lock_irqsave(q->queue_lock, flags);
976 spin_unlock_irqrestore(q->queue_lock, flags);
979 static void dm_done(struct request *clone, int error, bool mapped)
982 struct dm_rq_target_io *tio = clone->end_io_data;
983 dm_request_endio_fn rq_end_io = NULL;
986 rq_end_io = tio->ti->type->rq_end_io;
988 if (mapped && rq_end_io)
989 r = rq_end_io(tio->ti, clone, error, &tio->info);
993 /* The target wants to complete the I/O */
994 dm_end_request(clone, r);
995 else if (r == DM_ENDIO_INCOMPLETE)
996 /* The target will handle the I/O */
998 else if (r == DM_ENDIO_REQUEUE)
999 /* The target wants to requeue the I/O */
1000 dm_requeue_unmapped_request(clone);
1002 DMWARN("unimplemented target endio return value: %d", r);
1008 * Request completion handler for request-based dm
1010 static void dm_softirq_done(struct request *rq)
1013 struct request *clone = rq->completion_data;
1014 struct dm_rq_target_io *tio = clone->end_io_data;
1016 if (rq->cmd_flags & REQ_FAILED)
1019 dm_done(clone, tio->error, mapped);
1023 * Complete the clone and the original request with the error status
1024 * through softirq context.
1026 static void dm_complete_request(struct request *clone, int error)
1028 struct dm_rq_target_io *tio = clone->end_io_data;
1029 struct request *rq = tio->orig;
1032 rq->completion_data = clone;
1033 blk_complete_request(rq);
1037 * Complete the not-mapped clone and the original request with the error status
1038 * through softirq context.
1039 * Target's rq_end_io() function isn't called.
1040 * This may be used when the target's map_rq() function fails.
1042 void dm_kill_unmapped_request(struct request *clone, int error)
1044 struct dm_rq_target_io *tio = clone->end_io_data;
1045 struct request *rq = tio->orig;
1047 rq->cmd_flags |= REQ_FAILED;
1048 dm_complete_request(clone, error);
1050 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1053 * Called with the queue lock held
1055 static void end_clone_request(struct request *clone, int error)
1058 * For just cleaning up the information of the queue in which
1059 * the clone was dispatched.
1060 * The clone is *NOT* freed actually here because it is alloced from
1061 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1063 __blk_put_request(clone->q, clone);
1066 * Actual request completion is done in a softirq context which doesn't
1067 * hold the queue lock. Otherwise, deadlock could occur because:
1068 * - another request may be submitted by the upper level driver
1069 * of the stacking during the completion
1070 * - the submission which requires queue lock may be done
1071 * against this queue
1073 dm_complete_request(clone, error);
1077 * Return maximum size of I/O possible at the supplied sector up to the current
1080 static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1082 sector_t target_offset = dm_target_offset(ti, sector);
1084 return ti->len - target_offset;
1087 static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1089 sector_t len = max_io_len_target_boundary(sector, ti);
1090 sector_t offset, max_len;
1093 * Does the target need to split even further?
1095 if (ti->max_io_len) {
1096 offset = dm_target_offset(ti, sector);
1097 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1098 max_len = sector_div(offset, ti->max_io_len);
1100 max_len = offset & (ti->max_io_len - 1);
1101 max_len = ti->max_io_len - max_len;
1110 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1112 if (len > UINT_MAX) {
1113 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1114 (unsigned long long)len, UINT_MAX);
1115 ti->error = "Maximum size of target IO is too large";
1119 ti->max_io_len = (uint32_t) len;
1123 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1125 static void __map_bio(struct dm_target_io *tio)
1129 struct mapped_device *md;
1130 struct bio *clone = &tio->clone;
1131 struct dm_target *ti = tio->ti;
1133 clone->bi_end_io = clone_endio;
1134 clone->bi_private = tio;
1137 * Map the clone. If r == 0 we don't need to do
1138 * anything, the target has assumed ownership of
1141 atomic_inc(&tio->io->io_count);
1142 sector = clone->bi_iter.bi_sector;
1143 r = ti->type->map(ti, clone);
1144 if (r == DM_MAPIO_REMAPPED) {
1145 /* the bio has been remapped so dispatch it */
1147 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1148 tio->io->bio->bi_bdev->bd_dev, sector);
1150 generic_make_request(clone);
1151 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1152 /* error the io and bail out, or requeue it if needed */
1154 dec_pending(tio->io, r);
1157 DMWARN("unimplemented target map return value: %d", r);
1163 struct mapped_device *md;
1164 struct dm_table *map;
1168 sector_t sector_count;
1171 static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1173 bio->bi_iter.bi_sector = sector;
1174 bio->bi_iter.bi_size = to_bytes(len);
1178 * Creates a bio that consists of range of complete bvecs.
1180 static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1181 sector_t sector, unsigned len)
1183 struct bio *clone = &tio->clone;
1185 __bio_clone_fast(clone, bio);
1187 if (bio_integrity(bio))
1188 bio_integrity_clone(clone, bio, GFP_NOIO);
1190 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1191 clone->bi_iter.bi_size = to_bytes(len);
1193 if (bio_integrity(bio))
1194 bio_integrity_trim(clone, 0, len);
1197 static struct dm_target_io *alloc_tio(struct clone_info *ci,
1198 struct dm_target *ti, int nr_iovecs,
1199 unsigned target_bio_nr)
1201 struct dm_target_io *tio;
1204 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1205 tio = container_of(clone, struct dm_target_io, clone);
1209 memset(&tio->info, 0, sizeof(tio->info));
1210 tio->target_bio_nr = target_bio_nr;
1215 static void __clone_and_map_simple_bio(struct clone_info *ci,
1216 struct dm_target *ti,
1217 unsigned target_bio_nr, sector_t len)
1219 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
1220 struct bio *clone = &tio->clone;
1223 * Discard requests require the bio's inline iovecs be initialized.
1224 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1225 * and discard, so no need for concern about wasted bvec allocations.
1227 __bio_clone_fast(clone, ci->bio);
1229 bio_setup_sector(clone, ci->sector, len);
1234 static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1235 unsigned num_bios, sector_t len)
1237 unsigned target_bio_nr;
1239 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
1240 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
1243 static int __send_empty_flush(struct clone_info *ci)
1245 unsigned target_nr = 0;
1246 struct dm_target *ti;
1248 BUG_ON(bio_has_data(ci->bio));
1249 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1250 __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
1255 static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1256 sector_t sector, unsigned len)
1258 struct bio *bio = ci->bio;
1259 struct dm_target_io *tio;
1260 unsigned target_bio_nr;
1261 unsigned num_target_bios = 1;
1264 * Does the target want to receive duplicate copies of the bio?
1266 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1267 num_target_bios = ti->num_write_bios(ti, bio);
1269 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1270 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1271 clone_bio(tio, bio, sector, len);
1276 typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
1278 static unsigned get_num_discard_bios(struct dm_target *ti)
1280 return ti->num_discard_bios;
1283 static unsigned get_num_write_same_bios(struct dm_target *ti)
1285 return ti->num_write_same_bios;
1288 typedef bool (*is_split_required_fn)(struct dm_target *ti);
1290 static bool is_split_required_for_discard(struct dm_target *ti)
1292 return ti->split_discard_bios;
1295 static int __send_changing_extent_only(struct clone_info *ci,
1296 get_num_bios_fn get_num_bios,
1297 is_split_required_fn is_split_required)
1299 struct dm_target *ti;
1304 ti = dm_table_find_target(ci->map, ci->sector);
1305 if (!dm_target_is_valid(ti))
1309 * Even though the device advertised support for this type of
1310 * request, that does not mean every target supports it, and
1311 * reconfiguration might also have changed that since the
1312 * check was performed.
1314 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1318 if (is_split_required && !is_split_required(ti))
1319 len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1321 len = min(ci->sector_count, max_io_len(ci->sector, ti));
1323 __send_duplicate_bios(ci, ti, num_bios, len);
1326 } while (ci->sector_count -= len);
1331 static int __send_discard(struct clone_info *ci)
1333 return __send_changing_extent_only(ci, get_num_discard_bios,
1334 is_split_required_for_discard);
1337 static int __send_write_same(struct clone_info *ci)
1339 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
1343 * Select the correct strategy for processing a non-flush bio.
1345 static int __split_and_process_non_flush(struct clone_info *ci)
1347 struct bio *bio = ci->bio;
1348 struct dm_target *ti;
1351 if (unlikely(bio->bi_rw & REQ_DISCARD))
1352 return __send_discard(ci);
1353 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
1354 return __send_write_same(ci);
1356 ti = dm_table_find_target(ci->map, ci->sector);
1357 if (!dm_target_is_valid(ti))
1360 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1362 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1365 ci->sector_count -= len;
1371 * Entry point to split a bio into clones and submit them to the targets.
1373 static void __split_and_process_bio(struct mapped_device *md,
1374 struct dm_table *map, struct bio *bio)
1376 struct clone_info ci;
1379 if (unlikely(!map)) {
1386 ci.io = alloc_io(md);
1388 atomic_set(&ci.io->io_count, 1);
1391 spin_lock_init(&ci.io->endio_lock);
1392 ci.sector = bio->bi_iter.bi_sector;
1394 start_io_acct(ci.io);
1396 if (bio->bi_rw & REQ_FLUSH) {
1397 ci.bio = &ci.md->flush_bio;
1398 ci.sector_count = 0;
1399 error = __send_empty_flush(&ci);
1400 /* dec_pending submits any data associated with flush */
1403 ci.sector_count = bio_sectors(bio);
1404 while (ci.sector_count && !error)
1405 error = __split_and_process_non_flush(&ci);
1408 /* drop the extra reference count */
1409 dec_pending(ci.io, error);
1411 /*-----------------------------------------------------------------
1413 *---------------------------------------------------------------*/
1415 static int dm_merge_bvec(struct request_queue *q,
1416 struct bvec_merge_data *bvm,
1417 struct bio_vec *biovec)
1419 struct mapped_device *md = q->queuedata;
1420 struct dm_table *map = dm_get_live_table_fast(md);
1421 struct dm_target *ti;
1422 sector_t max_sectors;
1428 ti = dm_table_find_target(map, bvm->bi_sector);
1429 if (!dm_target_is_valid(ti))
1433 * Find maximum amount of I/O that won't need splitting
1435 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1436 (sector_t) BIO_MAX_SECTORS);
1437 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1442 * merge_bvec_fn() returns number of bytes
1443 * it can accept at this offset
1444 * max is precomputed maximal io size
1446 if (max_size && ti->type->merge)
1447 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1449 * If the target doesn't support merge method and some of the devices
1450 * provided their merge_bvec method (we know this by looking at
1451 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1452 * entries. So always set max_size to 0, and the code below allows
1455 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1460 dm_put_live_table_fast(md);
1462 * Always allow an entire first page
1464 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1465 max_size = biovec->bv_len;
1471 * The request function that just remaps the bio built up by
1474 static void _dm_request(struct request_queue *q, struct bio *bio)
1476 int rw = bio_data_dir(bio);
1477 struct mapped_device *md = q->queuedata;
1480 struct dm_table *map;
1482 map = dm_get_live_table(md, &srcu_idx);
1484 cpu = part_stat_lock();
1485 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1486 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1489 /* if we're suspended, we have to queue this io for later */
1490 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1491 dm_put_live_table(md, srcu_idx);
1493 if (bio_rw(bio) != READA)
1500 __split_and_process_bio(md, map, bio);
1501 dm_put_live_table(md, srcu_idx);
1505 int dm_request_based(struct mapped_device *md)
1507 return blk_queue_stackable(md->queue);
1510 static void dm_request(struct request_queue *q, struct bio *bio)
1512 struct mapped_device *md = q->queuedata;
1514 if (dm_request_based(md))
1515 blk_queue_bio(q, bio);
1517 _dm_request(q, bio);
1520 void dm_dispatch_request(struct request *rq)
1524 if (blk_queue_io_stat(rq->q))
1525 rq->cmd_flags |= REQ_IO_STAT;
1527 rq->start_time = jiffies;
1528 r = blk_insert_cloned_request(rq->q, rq);
1530 dm_complete_request(rq, r);
1532 EXPORT_SYMBOL_GPL(dm_dispatch_request);
1534 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1537 struct dm_rq_target_io *tio = data;
1538 struct dm_rq_clone_bio_info *info =
1539 container_of(bio, struct dm_rq_clone_bio_info, clone);
1541 info->orig = bio_orig;
1543 bio->bi_end_io = end_clone_bio;
1544 bio->bi_private = info;
1549 static int setup_clone(struct request *clone, struct request *rq,
1550 struct dm_rq_target_io *tio)
1554 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1555 dm_rq_bio_constructor, tio);
1559 clone->cmd = rq->cmd;
1560 clone->cmd_len = rq->cmd_len;
1561 clone->sense = rq->sense;
1562 clone->buffer = rq->buffer;
1563 clone->end_io = end_clone_request;
1564 clone->end_io_data = tio;
1569 static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1572 struct request *clone;
1573 struct dm_rq_target_io *tio;
1575 tio = alloc_rq_tio(md, gfp_mask);
1583 memset(&tio->info, 0, sizeof(tio->info));
1585 clone = &tio->clone;
1586 if (setup_clone(clone, rq, tio)) {
1596 * Called with the queue lock held.
1598 static int dm_prep_fn(struct request_queue *q, struct request *rq)
1600 struct mapped_device *md = q->queuedata;
1601 struct request *clone;
1603 if (unlikely(rq->special)) {
1604 DMWARN("Already has something in rq->special.");
1605 return BLKPREP_KILL;
1608 clone = clone_rq(rq, md, GFP_ATOMIC);
1610 return BLKPREP_DEFER;
1612 rq->special = clone;
1613 rq->cmd_flags |= REQ_DONTPREP;
1620 * 0 : the request has been processed (not requeued)
1621 * !0 : the request has been requeued
1623 static int map_request(struct dm_target *ti, struct request *clone,
1624 struct mapped_device *md)
1626 int r, requeued = 0;
1627 struct dm_rq_target_io *tio = clone->end_io_data;
1630 r = ti->type->map_rq(ti, clone, &tio->info);
1632 case DM_MAPIO_SUBMITTED:
1633 /* The target has taken the I/O to submit by itself later */
1635 case DM_MAPIO_REMAPPED:
1636 /* The target has remapped the I/O so dispatch it */
1637 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1638 blk_rq_pos(tio->orig));
1639 dm_dispatch_request(clone);
1641 case DM_MAPIO_REQUEUE:
1642 /* The target wants to requeue the I/O */
1643 dm_requeue_unmapped_request(clone);
1648 DMWARN("unimplemented target map return value: %d", r);
1652 /* The target wants to complete the I/O */
1653 dm_kill_unmapped_request(clone, r);
1660 static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1662 struct request *clone;
1664 blk_start_request(orig);
1665 clone = orig->special;
1666 atomic_inc(&md->pending[rq_data_dir(clone)]);
1669 * Hold the md reference here for the in-flight I/O.
1670 * We can't rely on the reference count by device opener,
1671 * because the device may be closed during the request completion
1672 * when all bios are completed.
1673 * See the comment in rq_completed() too.
1681 * q->request_fn for request-based dm.
1682 * Called with the queue lock held.
1684 static void dm_request_fn(struct request_queue *q)
1686 struct mapped_device *md = q->queuedata;
1688 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1689 struct dm_target *ti;
1690 struct request *rq, *clone;
1694 * For suspend, check blk_queue_stopped() and increment
1695 * ->pending within a single queue_lock not to increment the
1696 * number of in-flight I/Os after the queue is stopped in
1699 while (!blk_queue_stopped(q)) {
1700 rq = blk_peek_request(q);
1704 /* always use block 0 to find the target for flushes for now */
1706 if (!(rq->cmd_flags & REQ_FLUSH))
1707 pos = blk_rq_pos(rq);
1709 ti = dm_table_find_target(map, pos);
1710 if (!dm_target_is_valid(ti)) {
1712 * Must perform setup, that dm_done() requires,
1713 * before calling dm_kill_unmapped_request
1715 DMERR_LIMIT("request attempted access beyond the end of device");
1716 clone = dm_start_request(md, rq);
1717 dm_kill_unmapped_request(clone, -EIO);
1721 if (ti->type->busy && ti->type->busy(ti))
1724 clone = dm_start_request(md, rq);
1726 spin_unlock(q->queue_lock);
1727 if (map_request(ti, clone, md))
1730 BUG_ON(!irqs_disabled());
1731 spin_lock(q->queue_lock);
1737 BUG_ON(!irqs_disabled());
1738 spin_lock(q->queue_lock);
1741 blk_delay_queue(q, HZ / 10);
1743 dm_put_live_table(md, srcu_idx);
1746 int dm_underlying_device_busy(struct request_queue *q)
1748 return blk_lld_busy(q);
1750 EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1752 static int dm_lld_busy(struct request_queue *q)
1755 struct mapped_device *md = q->queuedata;
1756 struct dm_table *map = dm_get_live_table_fast(md);
1758 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1761 r = dm_table_any_busy_target(map);
1763 dm_put_live_table_fast(md);
1768 static int dm_any_congested(void *congested_data, int bdi_bits)
1771 struct mapped_device *md = congested_data;
1772 struct dm_table *map;
1774 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1775 map = dm_get_live_table_fast(md);
1778 * Request-based dm cares about only own queue for
1779 * the query about congestion status of request_queue
1781 if (dm_request_based(md))
1782 r = md->queue->backing_dev_info.state &
1785 r = dm_table_any_congested(map, bdi_bits);
1787 dm_put_live_table_fast(md);
1793 /*-----------------------------------------------------------------
1794 * An IDR is used to keep track of allocated minor numbers.
1795 *---------------------------------------------------------------*/
1796 static void free_minor(int minor)
1798 spin_lock(&_minor_lock);
1799 idr_remove(&_minor_idr, minor);
1800 spin_unlock(&_minor_lock);
1804 * See if the device with a specific minor # is free.
1806 static int specific_minor(int minor)
1810 if (minor >= (1 << MINORBITS))
1813 idr_preload(GFP_KERNEL);
1814 spin_lock(&_minor_lock);
1816 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1818 spin_unlock(&_minor_lock);
1821 return r == -ENOSPC ? -EBUSY : r;
1825 static int next_free_minor(int *minor)
1829 idr_preload(GFP_KERNEL);
1830 spin_lock(&_minor_lock);
1832 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1834 spin_unlock(&_minor_lock);
1842 static const struct block_device_operations dm_blk_dops;
1844 static void dm_wq_work(struct work_struct *work);
1846 static void dm_init_md_queue(struct mapped_device *md)
1849 * Request-based dm devices cannot be stacked on top of bio-based dm
1850 * devices. The type of this dm device has not been decided yet.
1851 * The type is decided at the first table loading time.
1852 * To prevent problematic device stacking, clear the queue flag
1853 * for request stacking support until then.
1855 * This queue is new, so no concurrency on the queue_flags.
1857 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1859 md->queue->queuedata = md;
1860 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1861 md->queue->backing_dev_info.congested_data = md;
1862 blk_queue_make_request(md->queue, dm_request);
1863 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1864 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1868 * Allocate and initialise a blank device with a given minor.
1870 static struct mapped_device *alloc_dev(int minor)
1873 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1877 DMWARN("unable to allocate device, out of memory.");
1881 if (!try_module_get(THIS_MODULE))
1882 goto bad_module_get;
1884 /* get a minor number for the dev */
1885 if (minor == DM_ANY_MINOR)
1886 r = next_free_minor(&minor);
1888 r = specific_minor(minor);
1892 r = init_srcu_struct(&md->io_barrier);
1894 goto bad_io_barrier;
1896 md->type = DM_TYPE_NONE;
1897 mutex_init(&md->suspend_lock);
1898 mutex_init(&md->type_lock);
1899 spin_lock_init(&md->deferred_lock);
1900 atomic_set(&md->holders, 1);
1901 atomic_set(&md->open_count, 0);
1902 atomic_set(&md->event_nr, 0);
1903 atomic_set(&md->uevent_seq, 0);
1904 INIT_LIST_HEAD(&md->uevent_list);
1905 spin_lock_init(&md->uevent_lock);
1907 md->queue = blk_alloc_queue(GFP_KERNEL);
1911 dm_init_md_queue(md);
1913 md->disk = alloc_disk(1);
1917 atomic_set(&md->pending[0], 0);
1918 atomic_set(&md->pending[1], 0);
1919 init_waitqueue_head(&md->wait);
1920 INIT_WORK(&md->work, dm_wq_work);
1921 init_waitqueue_head(&md->eventq);
1922 init_completion(&md->kobj_holder.completion);
1924 md->disk->major = _major;
1925 md->disk->first_minor = minor;
1926 md->disk->fops = &dm_blk_dops;
1927 md->disk->queue = md->queue;
1928 md->disk->private_data = md;
1929 sprintf(md->disk->disk_name, "dm-%d", minor);
1931 format_dev_t(md->name, MKDEV(_major, minor));
1933 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
1937 md->bdev = bdget_disk(md->disk, 0);
1941 bio_init(&md->flush_bio);
1942 md->flush_bio.bi_bdev = md->bdev;
1943 md->flush_bio.bi_rw = WRITE_FLUSH;
1945 dm_stats_init(&md->stats);
1947 /* Populate the mapping, nobody knows we exist yet */
1948 spin_lock(&_minor_lock);
1949 old_md = idr_replace(&_minor_idr, md, minor);
1950 spin_unlock(&_minor_lock);
1952 BUG_ON(old_md != MINOR_ALLOCED);
1957 destroy_workqueue(md->wq);
1959 del_gendisk(md->disk);
1962 blk_cleanup_queue(md->queue);
1964 cleanup_srcu_struct(&md->io_barrier);
1968 module_put(THIS_MODULE);
1974 static void unlock_fs(struct mapped_device *md);
1976 static void free_dev(struct mapped_device *md)
1978 int minor = MINOR(disk_devt(md->disk));
1982 destroy_workqueue(md->wq);
1984 mempool_destroy(md->io_pool);
1986 bioset_free(md->bs);
1987 blk_integrity_unregister(md->disk);
1988 del_gendisk(md->disk);
1989 cleanup_srcu_struct(&md->io_barrier);
1992 spin_lock(&_minor_lock);
1993 md->disk->private_data = NULL;
1994 spin_unlock(&_minor_lock);
1997 blk_cleanup_queue(md->queue);
1998 dm_stats_cleanup(&md->stats);
1999 module_put(THIS_MODULE);
2003 static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2005 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
2007 if (md->io_pool && md->bs) {
2008 /* The md already has necessary mempools. */
2009 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2011 * Reload bioset because front_pad may have changed
2012 * because a different table was loaded.
2014 bioset_free(md->bs);
2017 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
2019 * There's no need to reload with request-based dm
2020 * because the size of front_pad doesn't change.
2021 * Note for future: If you are to reload bioset,
2022 * prep-ed requests in the queue may refer
2023 * to bio from the old bioset, so you must walk
2024 * through the queue to unprep.
2030 BUG_ON(!p || md->io_pool || md->bs);
2032 md->io_pool = p->io_pool;
2038 /* mempool bind completed, now no need any mempools in the table */
2039 dm_table_free_md_mempools(t);
2043 * Bind a table to the device.
2045 static void event_callback(void *context)
2047 unsigned long flags;
2049 struct mapped_device *md = (struct mapped_device *) context;
2051 spin_lock_irqsave(&md->uevent_lock, flags);
2052 list_splice_init(&md->uevent_list, &uevents);
2053 spin_unlock_irqrestore(&md->uevent_lock, flags);
2055 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2057 atomic_inc(&md->event_nr);
2058 wake_up(&md->eventq);
2062 * Protected by md->suspend_lock obtained by dm_swap_table().
2064 static void __set_size(struct mapped_device *md, sector_t size)
2066 set_capacity(md->disk, size);
2068 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2072 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2074 * If this function returns 0, then the device is either a non-dm
2075 * device without a merge_bvec_fn, or it is a dm device that is
2076 * able to split any bios it receives that are too big.
2078 int dm_queue_merge_is_compulsory(struct request_queue *q)
2080 struct mapped_device *dev_md;
2082 if (!q->merge_bvec_fn)
2085 if (q->make_request_fn == dm_request) {
2086 dev_md = q->queuedata;
2087 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2094 static int dm_device_merge_is_compulsory(struct dm_target *ti,
2095 struct dm_dev *dev, sector_t start,
2096 sector_t len, void *data)
2098 struct block_device *bdev = dev->bdev;
2099 struct request_queue *q = bdev_get_queue(bdev);
2101 return dm_queue_merge_is_compulsory(q);
2105 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2106 * on the properties of the underlying devices.
2108 static int dm_table_merge_is_optional(struct dm_table *table)
2111 struct dm_target *ti;
2113 while (i < dm_table_get_num_targets(table)) {
2114 ti = dm_table_get_target(table, i++);
2116 if (ti->type->iterate_devices &&
2117 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2125 * Returns old map, which caller must destroy.
2127 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2128 struct queue_limits *limits)
2130 struct dm_table *old_map;
2131 struct request_queue *q = md->queue;
2133 int merge_is_optional;
2135 size = dm_table_get_size(t);
2138 * Wipe any geometry if the size of the table changed.
2140 if (size != dm_get_size(md))
2141 memset(&md->geometry, 0, sizeof(md->geometry));
2143 __set_size(md, size);
2145 dm_table_event_callback(t, event_callback, md);
2148 * The queue hasn't been stopped yet, if the old table type wasn't
2149 * for request-based during suspension. So stop it to prevent
2150 * I/O mapping before resume.
2151 * This must be done before setting the queue restrictions,
2152 * because request-based dm may be run just after the setting.
2154 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2157 __bind_mempools(md, t);
2159 merge_is_optional = dm_table_merge_is_optional(t);
2162 rcu_assign_pointer(md->map, t);
2163 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2165 dm_table_set_restrictions(t, q, limits);
2166 if (merge_is_optional)
2167 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2169 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2176 * Returns unbound table for the caller to free.
2178 static struct dm_table *__unbind(struct mapped_device *md)
2180 struct dm_table *map = md->map;
2185 dm_table_event_callback(map, NULL, NULL);
2186 rcu_assign_pointer(md->map, NULL);
2193 * Constructor for a new device.
2195 int dm_create(int minor, struct mapped_device **result)
2197 struct mapped_device *md;
2199 md = alloc_dev(minor);
2210 * Functions to manage md->type.
2211 * All are required to hold md->type_lock.
2213 void dm_lock_md_type(struct mapped_device *md)
2215 mutex_lock(&md->type_lock);
2218 void dm_unlock_md_type(struct mapped_device *md)
2220 mutex_unlock(&md->type_lock);
2223 void dm_set_md_type(struct mapped_device *md, unsigned type)
2225 BUG_ON(!mutex_is_locked(&md->type_lock));
2229 unsigned dm_get_md_type(struct mapped_device *md)
2231 BUG_ON(!mutex_is_locked(&md->type_lock));
2235 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2237 return md->immutable_target_type;
2241 * The queue_limits are only valid as long as you have a reference
2244 struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2246 BUG_ON(!atomic_read(&md->holders));
2247 return &md->queue->limits;
2249 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2252 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2254 static int dm_init_request_based_queue(struct mapped_device *md)
2256 struct request_queue *q = NULL;
2258 if (md->queue->elevator)
2261 /* Fully initialize the queue */
2262 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2267 dm_init_md_queue(md);
2268 blk_queue_softirq_done(md->queue, dm_softirq_done);
2269 blk_queue_prep_rq(md->queue, dm_prep_fn);
2270 blk_queue_lld_busy(md->queue, dm_lld_busy);
2272 elv_register_queue(md->queue);
2278 * Setup the DM device's queue based on md's type
2280 int dm_setup_md_queue(struct mapped_device *md)
2282 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2283 !dm_init_request_based_queue(md)) {
2284 DMWARN("Cannot initialize queue for request-based mapped device");
2291 static struct mapped_device *dm_find_md(dev_t dev)
2293 struct mapped_device *md;
2294 unsigned minor = MINOR(dev);
2296 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2299 spin_lock(&_minor_lock);
2301 md = idr_find(&_minor_idr, minor);
2302 if (md && (md == MINOR_ALLOCED ||
2303 (MINOR(disk_devt(dm_disk(md))) != minor) ||
2304 dm_deleting_md(md) ||
2305 test_bit(DMF_FREEING, &md->flags))) {
2311 spin_unlock(&_minor_lock);
2316 struct mapped_device *dm_get_md(dev_t dev)
2318 struct mapped_device *md = dm_find_md(dev);
2325 EXPORT_SYMBOL_GPL(dm_get_md);
2327 void *dm_get_mdptr(struct mapped_device *md)
2329 return md->interface_ptr;
2332 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2334 md->interface_ptr = ptr;
2337 void dm_get(struct mapped_device *md)
2339 atomic_inc(&md->holders);
2340 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2343 const char *dm_device_name(struct mapped_device *md)
2347 EXPORT_SYMBOL_GPL(dm_device_name);
2349 static void __dm_destroy(struct mapped_device *md, bool wait)
2351 struct dm_table *map;
2356 spin_lock(&_minor_lock);
2357 map = dm_get_live_table(md, &srcu_idx);
2358 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2359 set_bit(DMF_FREEING, &md->flags);
2360 spin_unlock(&_minor_lock);
2362 if (!dm_suspended_md(md)) {
2363 dm_table_presuspend_targets(map);
2364 dm_table_postsuspend_targets(map);
2367 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2368 dm_put_live_table(md, srcu_idx);
2371 * Rare, but there may be I/O requests still going to complete,
2372 * for example. Wait for all references to disappear.
2373 * No one should increment the reference count of the mapped_device,
2374 * after the mapped_device state becomes DMF_FREEING.
2377 while (atomic_read(&md->holders))
2379 else if (atomic_read(&md->holders))
2380 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2381 dm_device_name(md), atomic_read(&md->holders));
2384 dm_table_destroy(__unbind(md));
2388 void dm_destroy(struct mapped_device *md)
2390 __dm_destroy(md, true);
2393 void dm_destroy_immediate(struct mapped_device *md)
2395 __dm_destroy(md, false);
2398 void dm_put(struct mapped_device *md)
2400 atomic_dec(&md->holders);
2402 EXPORT_SYMBOL_GPL(dm_put);
2404 static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2407 DECLARE_WAITQUEUE(wait, current);
2409 add_wait_queue(&md->wait, &wait);
2412 set_current_state(interruptible);
2414 if (!md_in_flight(md))
2417 if (interruptible == TASK_INTERRUPTIBLE &&
2418 signal_pending(current)) {
2425 set_current_state(TASK_RUNNING);
2427 remove_wait_queue(&md->wait, &wait);
2433 * Process the deferred bios
2435 static void dm_wq_work(struct work_struct *work)
2437 struct mapped_device *md = container_of(work, struct mapped_device,
2441 struct dm_table *map;
2443 map = dm_get_live_table(md, &srcu_idx);
2445 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2446 spin_lock_irq(&md->deferred_lock);
2447 c = bio_list_pop(&md->deferred);
2448 spin_unlock_irq(&md->deferred_lock);
2453 if (dm_request_based(md))
2454 generic_make_request(c);
2456 __split_and_process_bio(md, map, c);
2459 dm_put_live_table(md, srcu_idx);
2462 static void dm_queue_flush(struct mapped_device *md)
2464 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2465 smp_mb__after_clear_bit();
2466 queue_work(md->wq, &md->work);
2470 * Swap in a new table, returning the old one for the caller to destroy.
2472 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2474 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2475 struct queue_limits limits;
2478 mutex_lock(&md->suspend_lock);
2480 /* device must be suspended */
2481 if (!dm_suspended_md(md))
2485 * If the new table has no data devices, retain the existing limits.
2486 * This helps multipath with queue_if_no_path if all paths disappear,
2487 * then new I/O is queued based on these limits, and then some paths
2490 if (dm_table_has_no_data_devices(table)) {
2491 live_map = dm_get_live_table_fast(md);
2493 limits = md->queue->limits;
2494 dm_put_live_table_fast(md);
2498 r = dm_calculate_queue_limits(table, &limits);
2505 map = __bind(md, table, &limits);
2508 mutex_unlock(&md->suspend_lock);
2513 * Functions to lock and unlock any filesystem running on the
2516 static int lock_fs(struct mapped_device *md)
2520 WARN_ON(md->frozen_sb);
2522 md->frozen_sb = freeze_bdev(md->bdev);
2523 if (IS_ERR(md->frozen_sb)) {
2524 r = PTR_ERR(md->frozen_sb);
2525 md->frozen_sb = NULL;
2529 set_bit(DMF_FROZEN, &md->flags);
2534 static void unlock_fs(struct mapped_device *md)
2536 if (!test_bit(DMF_FROZEN, &md->flags))
2539 thaw_bdev(md->bdev, md->frozen_sb);
2540 md->frozen_sb = NULL;
2541 clear_bit(DMF_FROZEN, &md->flags);
2545 * We need to be able to change a mapping table under a mounted
2546 * filesystem. For example we might want to move some data in
2547 * the background. Before the table can be swapped with
2548 * dm_bind_table, dm_suspend must be called to flush any in
2549 * flight bios and ensure that any further io gets deferred.
2552 * Suspend mechanism in request-based dm.
2554 * 1. Flush all I/Os by lock_fs() if needed.
2555 * 2. Stop dispatching any I/O by stopping the request_queue.
2556 * 3. Wait for all in-flight I/Os to be completed or requeued.
2558 * To abort suspend, start the request_queue.
2560 int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2562 struct dm_table *map = NULL;
2564 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2565 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2567 mutex_lock(&md->suspend_lock);
2569 if (dm_suspended_md(md)) {
2577 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2578 * This flag is cleared before dm_suspend returns.
2581 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2583 /* This does not get reverted if there's an error later. */
2584 dm_table_presuspend_targets(map);
2587 * Flush I/O to the device.
2588 * Any I/O submitted after lock_fs() may not be flushed.
2589 * noflush takes precedence over do_lockfs.
2590 * (lock_fs() flushes I/Os and waits for them to complete.)
2592 if (!noflush && do_lockfs) {
2599 * Here we must make sure that no processes are submitting requests
2600 * to target drivers i.e. no one may be executing
2601 * __split_and_process_bio. This is called from dm_request and
2604 * To get all processes out of __split_and_process_bio in dm_request,
2605 * we take the write lock. To prevent any process from reentering
2606 * __split_and_process_bio from dm_request and quiesce the thread
2607 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2608 * flush_workqueue(md->wq).
2610 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2611 synchronize_srcu(&md->io_barrier);
2614 * Stop md->queue before flushing md->wq in case request-based
2615 * dm defers requests to md->wq from md->queue.
2617 if (dm_request_based(md))
2618 stop_queue(md->queue);
2620 flush_workqueue(md->wq);
2623 * At this point no more requests are entering target request routines.
2624 * We call dm_wait_for_completion to wait for all existing requests
2627 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2630 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2631 synchronize_srcu(&md->io_barrier);
2633 /* were we interrupted ? */
2637 if (dm_request_based(md))
2638 start_queue(md->queue);
2641 goto out_unlock; /* pushback list is already flushed, so skip flush */
2645 * If dm_wait_for_completion returned 0, the device is completely
2646 * quiescent now. There is no request-processing activity. All new
2647 * requests are being added to md->deferred list.
2650 set_bit(DMF_SUSPENDED, &md->flags);
2652 dm_table_postsuspend_targets(map);
2655 mutex_unlock(&md->suspend_lock);
2659 int dm_resume(struct mapped_device *md)
2662 struct dm_table *map = NULL;
2664 mutex_lock(&md->suspend_lock);
2665 if (!dm_suspended_md(md))
2669 if (!map || !dm_table_get_size(map))
2672 r = dm_table_resume_targets(map);
2679 * Flushing deferred I/Os must be done after targets are resumed
2680 * so that mapping of targets can work correctly.
2681 * Request-based dm is queueing the deferred I/Os in its request_queue.
2683 if (dm_request_based(md))
2684 start_queue(md->queue);
2688 clear_bit(DMF_SUSPENDED, &md->flags);
2692 mutex_unlock(&md->suspend_lock);
2698 * Internal suspend/resume works like userspace-driven suspend. It waits
2699 * until all bios finish and prevents issuing new bios to the target drivers.
2700 * It may be used only from the kernel.
2702 * Internal suspend holds md->suspend_lock, which prevents interaction with
2703 * userspace-driven suspend.
2706 void dm_internal_suspend(struct mapped_device *md)
2708 mutex_lock(&md->suspend_lock);
2709 if (dm_suspended_md(md))
2712 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2713 synchronize_srcu(&md->io_barrier);
2714 flush_workqueue(md->wq);
2715 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2718 void dm_internal_resume(struct mapped_device *md)
2720 if (dm_suspended_md(md))
2726 mutex_unlock(&md->suspend_lock);
2729 /*-----------------------------------------------------------------
2730 * Event notification.
2731 *---------------------------------------------------------------*/
2732 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2735 char udev_cookie[DM_COOKIE_LENGTH];
2736 char *envp[] = { udev_cookie, NULL };
2739 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2741 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2742 DM_COOKIE_ENV_VAR_NAME, cookie);
2743 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2748 uint32_t dm_next_uevent_seq(struct mapped_device *md)
2750 return atomic_add_return(1, &md->uevent_seq);
2753 uint32_t dm_get_event_nr(struct mapped_device *md)
2755 return atomic_read(&md->event_nr);
2758 int dm_wait_event(struct mapped_device *md, int event_nr)
2760 return wait_event_interruptible(md->eventq,
2761 (event_nr != atomic_read(&md->event_nr)));
2764 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2766 unsigned long flags;
2768 spin_lock_irqsave(&md->uevent_lock, flags);
2769 list_add(elist, &md->uevent_list);
2770 spin_unlock_irqrestore(&md->uevent_lock, flags);
2774 * The gendisk is only valid as long as you have a reference
2777 struct gendisk *dm_disk(struct mapped_device *md)
2782 struct kobject *dm_kobject(struct mapped_device *md)
2784 return &md->kobj_holder.kobj;
2787 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2789 struct mapped_device *md;
2791 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
2793 if (test_bit(DMF_FREEING, &md->flags) ||
2801 int dm_suspended_md(struct mapped_device *md)
2803 return test_bit(DMF_SUSPENDED, &md->flags);
2806 int dm_test_deferred_remove_flag(struct mapped_device *md)
2808 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2811 int dm_suspended(struct dm_target *ti)
2813 return dm_suspended_md(dm_table_get_md(ti->table));
2815 EXPORT_SYMBOL_GPL(dm_suspended);
2817 int dm_noflush_suspending(struct dm_target *ti)
2819 return __noflush_suspending(dm_table_get_md(ti->table));
2821 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2823 struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
2825 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
2826 struct kmem_cache *cachep;
2827 unsigned int pool_size;
2828 unsigned int front_pad;
2833 if (type == DM_TYPE_BIO_BASED) {
2835 pool_size = dm_get_reserved_bio_based_ios();
2836 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2837 } else if (type == DM_TYPE_REQUEST_BASED) {
2838 cachep = _rq_tio_cache;
2839 pool_size = dm_get_reserved_rq_based_ios();
2840 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2841 /* per_bio_data_size is not used. See __bind_mempools(). */
2842 WARN_ON(per_bio_data_size != 0);
2846 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
2847 if (!pools->io_pool)
2850 pools->bs = bioset_create(pool_size, front_pad);
2854 if (integrity && bioset_integrity_create(pools->bs, pool_size))
2860 dm_free_md_mempools(pools);
2865 void dm_free_md_mempools(struct dm_md_mempools *pools)
2871 mempool_destroy(pools->io_pool);
2874 bioset_free(pools->bs);
2879 static const struct block_device_operations dm_blk_dops = {
2880 .open = dm_blk_open,
2881 .release = dm_blk_close,
2882 .ioctl = dm_blk_ioctl,
2883 .getgeo = dm_blk_getgeo,
2884 .owner = THIS_MODULE
2887 EXPORT_SYMBOL(dm_get_mapinfo);
2892 module_init(dm_init);
2893 module_exit(dm_exit);
2895 module_param(major, uint, 0);
2896 MODULE_PARM_DESC(major, "The major number of the device mapper");
2898 module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2899 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2901 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2902 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2904 MODULE_DESCRIPTION(DM_NAME " driver");
2905 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2906 MODULE_LICENSE("GPL");