2 * Copyright (C) 2011-2012 Red Hat UK.
4 * This file is released under the GPL.
7 #include "dm-thin-metadata.h"
10 #include <linux/device-mapper.h>
11 #include <linux/dm-io.h>
12 #include <linux/dm-kcopyd.h>
13 #include <linux/list.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
18 #define DM_MSG_PREFIX "thin"
23 #define ENDIO_HOOK_POOL_SIZE 1024
24 #define DEFERRED_SET_SIZE 64
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
30 * The block size of the device holding pool data must be
31 * between 64KB and 1GB.
33 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
37 * Device id is restricted to 24 bits.
39 #define MAX_DEV_ID ((1 << 24) - 1)
42 * How do we handle breaking sharing of data blocks?
43 * =================================================
45 * We use a standard copy-on-write btree to store the mappings for the
46 * devices (note I'm talking about copy-on-write of the metadata here, not
47 * the data). When you take an internal snapshot you clone the root node
48 * of the origin btree. After this there is no concept of an origin or a
49 * snapshot. They are just two device trees that happen to point to the
52 * When we get a write in we decide if it's to a shared data block using
53 * some timestamp magic. If it is, we have to break sharing.
55 * Let's say we write to a shared block in what was the origin. The
58 * i) plug io further to this physical block. (see bio_prison code).
60 * ii) quiesce any read io to that shared data block. Obviously
61 * including all devices that share this block. (see dm_deferred_set code)
63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy).
66 * iv) insert the new mapping into the origin's btree
67 * (process_prepared_mapping). This act of inserting breaks some
68 * sharing of btree nodes between the two devices. Breaking sharing only
69 * effects the btree of that specific device. Btrees for the other
70 * devices that share the block never change. The btree for the origin
71 * device as it was after the last commit is untouched, ie. we're using
72 * persistent data structures in the functional programming sense.
74 * v) unplug io to this physical block, including the io that triggered
75 * the breaking of sharing.
77 * Steps (ii) and (iii) occur in parallel.
79 * The metadata _doesn't_ need to be committed before the io continues. We
80 * get away with this because the io is always written to a _new_ block.
81 * If there's a crash, then:
83 * - The origin mapping will point to the old origin block (the shared
84 * one). This will contain the data as it was before the io that triggered
85 * the breaking of sharing came in.
87 * - The snap mapping still points to the old block. As it would after
90 * The downside of this scheme is the timestamp magic isn't perfect, and
91 * will continue to think that data block in the snapshot device is shared
92 * even after the write to the origin has broken sharing. I suspect data
93 * blocks will typically be shared by many different devices, so we're
94 * breaking sharing n + 1 times, rather than n, where n is the number of
95 * devices that reference this data block. At the moment I think the
96 * benefits far, far outweigh the disadvantages.
99 /*----------------------------------------------------------------*/
102 * Sometimes we can't deal with a bio straight away. We put them in prison
103 * where they can't cause any mischief. Bios are put in a cell identified
104 * by a key, multiple bios can be in the same cell. When the cell is
105 * subsequently unlocked the bios become available.
107 struct dm_bio_prison;
115 struct dm_bio_prison_cell {
116 struct hlist_node list;
117 struct dm_bio_prison *prison;
118 struct dm_cell_key key;
120 struct bio_list bios;
123 struct dm_bio_prison {
125 mempool_t *cell_pool;
129 struct hlist_head *cells;
132 static uint32_t calc_nr_buckets(unsigned nr_cells)
137 nr_cells = min(nr_cells, 8192u);
145 static struct kmem_cache *_cell_cache;
148 * @nr_cells should be the number of cells you want in use _concurrently_.
149 * Don't confuse it with the number of distinct keys.
151 static struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
154 uint32_t nr_buckets = calc_nr_buckets(nr_cells);
155 size_t len = sizeof(struct dm_bio_prison) +
156 (sizeof(struct hlist_head) * nr_buckets);
157 struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
162 spin_lock_init(&prison->lock);
163 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
164 if (!prison->cell_pool) {
169 prison->nr_buckets = nr_buckets;
170 prison->hash_mask = nr_buckets - 1;
171 prison->cells = (struct hlist_head *) (prison + 1);
172 for (i = 0; i < nr_buckets; i++)
173 INIT_HLIST_HEAD(prison->cells + i);
178 static void dm_bio_prison_destroy(struct dm_bio_prison *prison)
180 mempool_destroy(prison->cell_pool);
184 static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
186 const unsigned long BIG_PRIME = 4294967291UL;
187 uint64_t hash = key->block * BIG_PRIME;
189 return (uint32_t) (hash & prison->hash_mask);
192 static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
194 return (lhs->virtual == rhs->virtual) &&
195 (lhs->dev == rhs->dev) &&
196 (lhs->block == rhs->block);
199 static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
200 struct dm_cell_key *key)
202 struct dm_bio_prison_cell *cell;
203 struct hlist_node *tmp;
205 hlist_for_each_entry(cell, tmp, bucket, list)
206 if (keys_equal(&cell->key, key))
213 * This may block if a new cell needs allocating. You must ensure that
214 * cells will be unlocked even if the calling thread is blocked.
216 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
218 static int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
219 struct bio *inmate, struct dm_bio_prison_cell **ref)
223 uint32_t hash = hash_key(prison, key);
224 struct dm_bio_prison_cell *cell, *cell2;
226 BUG_ON(hash > prison->nr_buckets);
228 spin_lock_irqsave(&prison->lock, flags);
230 cell = __search_bucket(prison->cells + hash, key);
232 bio_list_add(&cell->bios, inmate);
237 * Allocate a new cell
239 spin_unlock_irqrestore(&prison->lock, flags);
240 cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
241 spin_lock_irqsave(&prison->lock, flags);
244 * We've been unlocked, so we have to double check that
245 * nobody else has inserted this cell in the meantime.
247 cell = __search_bucket(prison->cells + hash, key);
249 mempool_free(cell2, prison->cell_pool);
250 bio_list_add(&cell->bios, inmate);
259 cell->prison = prison;
260 memcpy(&cell->key, key, sizeof(cell->key));
261 cell->holder = inmate;
262 bio_list_init(&cell->bios);
263 hlist_add_head(&cell->list, prison->cells + hash);
268 spin_unlock_irqrestore(&prison->lock, flags);
276 * @inmates must have been initialised prior to this call
278 static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
280 struct dm_bio_prison *prison = cell->prison;
282 hlist_del(&cell->list);
285 bio_list_add(inmates, cell->holder);
286 bio_list_merge(inmates, &cell->bios);
289 mempool_free(cell, prison->cell_pool);
292 static void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
295 struct dm_bio_prison *prison = cell->prison;
297 spin_lock_irqsave(&prison->lock, flags);
298 __cell_release(cell, bios);
299 spin_unlock_irqrestore(&prison->lock, flags);
303 * There are a couple of places where we put a bio into a cell briefly
304 * before taking it out again. In these situations we know that no other
305 * bio may be in the cell. This function releases the cell, and also does
308 static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
310 BUG_ON(cell->holder != bio);
311 BUG_ON(!bio_list_empty(&cell->bios));
313 __cell_release(cell, NULL);
316 static void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
319 struct dm_bio_prison *prison = cell->prison;
321 spin_lock_irqsave(&prison->lock, flags);
322 __cell_release_singleton(cell, bio);
323 spin_unlock_irqrestore(&prison->lock, flags);
327 * Sometimes we don't want the holder, just the additional bios.
329 static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
330 struct bio_list *inmates)
332 struct dm_bio_prison *prison = cell->prison;
334 hlist_del(&cell->list);
335 bio_list_merge(inmates, &cell->bios);
337 mempool_free(cell, prison->cell_pool);
340 static void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell,
341 struct bio_list *inmates)
344 struct dm_bio_prison *prison = cell->prison;
346 spin_lock_irqsave(&prison->lock, flags);
347 __cell_release_no_holder(cell, inmates);
348 spin_unlock_irqrestore(&prison->lock, flags);
351 static void dm_cell_error(struct dm_bio_prison_cell *cell)
353 struct dm_bio_prison *prison = cell->prison;
354 struct bio_list bios;
358 bio_list_init(&bios);
360 spin_lock_irqsave(&prison->lock, flags);
361 __cell_release(cell, &bios);
362 spin_unlock_irqrestore(&prison->lock, flags);
364 while ((bio = bio_list_pop(&bios)))
368 /*----------------------------------------------------------------*/
371 * We use the deferred set to keep track of pending reads to shared blocks.
372 * We do this to ensure the new mapping caused by a write isn't performed
373 * until these prior reads have completed. Otherwise the insertion of the
374 * new mapping could free the old block that the read bios are mapped to.
377 struct dm_deferred_set;
378 struct dm_deferred_entry {
379 struct dm_deferred_set *ds;
381 struct list_head work_items;
384 struct dm_deferred_set {
386 unsigned current_entry;
388 struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
391 static struct dm_deferred_set *dm_deferred_set_create(void)
394 struct dm_deferred_set *ds;
396 ds = kmalloc(sizeof(*ds), GFP_KERNEL);
400 spin_lock_init(&ds->lock);
401 ds->current_entry = 0;
403 for (i = 0; i < DEFERRED_SET_SIZE; i++) {
404 ds->entries[i].ds = ds;
405 ds->entries[i].count = 0;
406 INIT_LIST_HEAD(&ds->entries[i].work_items);
412 static void dm_deferred_set_destroy(struct dm_deferred_set *ds)
417 static struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
420 struct dm_deferred_entry *entry;
422 spin_lock_irqsave(&ds->lock, flags);
423 entry = ds->entries + ds->current_entry;
425 spin_unlock_irqrestore(&ds->lock, flags);
430 static unsigned ds_next(unsigned index)
432 return (index + 1) % DEFERRED_SET_SIZE;
435 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
437 while ((ds->sweeper != ds->current_entry) &&
438 !ds->entries[ds->sweeper].count) {
439 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
440 ds->sweeper = ds_next(ds->sweeper);
443 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
444 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
447 static void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
451 spin_lock_irqsave(&entry->ds->lock, flags);
452 BUG_ON(!entry->count);
454 __sweep(entry->ds, head);
455 spin_unlock_irqrestore(&entry->ds->lock, flags);
459 * Returns 1 if deferred or 0 if no pending items to delay job.
461 static int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
467 spin_lock_irqsave(&ds->lock, flags);
468 if ((ds->sweeper == ds->current_entry) &&
469 !ds->entries[ds->current_entry].count)
472 list_add(work, &ds->entries[ds->current_entry].work_items);
473 next_entry = ds_next(ds->current_entry);
474 if (!ds->entries[next_entry].count)
475 ds->current_entry = next_entry;
477 spin_unlock_irqrestore(&ds->lock, flags);
482 static int __init dm_bio_prison_init(void)
484 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
491 static void __exit dm_bio_prison_exit(void)
493 kmem_cache_destroy(_cell_cache);
497 /*----------------------------------------------------------------*/
502 static void build_data_key(struct dm_thin_device *td,
503 dm_block_t b, struct dm_cell_key *key)
506 key->dev = dm_thin_dev_id(td);
510 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
511 struct dm_cell_key *key)
514 key->dev = dm_thin_dev_id(td);
518 /*----------------------------------------------------------------*/
521 * A pool device ties together a metadata device and a data device. It
522 * also provides the interface for creating and destroying internal
525 struct dm_thin_new_mapping;
528 * The pool runs in 3 modes. Ordered in degraded order for comparisons.
531 PM_WRITE, /* metadata may be changed */
532 PM_READ_ONLY, /* metadata may not be changed */
533 PM_FAIL, /* all I/O fails */
536 struct pool_features {
539 bool zero_new_blocks:1;
540 bool discard_enabled:1;
541 bool discard_passdown:1;
545 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
546 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
549 struct list_head list;
550 struct dm_target *ti; /* Only set if a pool target is bound */
552 struct mapped_device *pool_md;
553 struct block_device *md_dev;
554 struct dm_pool_metadata *pmd;
556 dm_block_t low_water_blocks;
557 uint32_t sectors_per_block;
558 int sectors_per_block_shift;
560 struct pool_features pf;
561 unsigned low_water_triggered:1; /* A dm event has been sent */
562 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
564 struct dm_bio_prison *prison;
565 struct dm_kcopyd_client *copier;
567 struct workqueue_struct *wq;
568 struct work_struct worker;
569 struct delayed_work waker;
571 unsigned long last_commit_jiffies;
575 struct bio_list deferred_bios;
576 struct bio_list deferred_flush_bios;
577 struct list_head prepared_mappings;
578 struct list_head prepared_discards;
580 struct bio_list retry_on_resume_list;
582 struct dm_deferred_set *shared_read_ds;
583 struct dm_deferred_set *all_io_ds;
585 struct dm_thin_new_mapping *next_mapping;
586 mempool_t *mapping_pool;
587 mempool_t *endio_hook_pool;
589 process_bio_fn process_bio;
590 process_bio_fn process_discard;
592 process_mapping_fn process_prepared_mapping;
593 process_mapping_fn process_prepared_discard;
596 static enum pool_mode get_pool_mode(struct pool *pool);
597 static void set_pool_mode(struct pool *pool, enum pool_mode mode);
600 * Target context for a pool.
603 struct dm_target *ti;
605 struct dm_dev *data_dev;
606 struct dm_dev *metadata_dev;
607 struct dm_target_callbacks callbacks;
609 dm_block_t low_water_blocks;
610 struct pool_features requested_pf; /* Features requested during table load */
611 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
615 * Target context for a thin.
618 struct dm_dev *pool_dev;
619 struct dm_dev *origin_dev;
623 struct dm_thin_device *td;
626 /*----------------------------------------------------------------*/
629 * A global list of pools that uses a struct mapped_device as a key.
631 static struct dm_thin_pool_table {
633 struct list_head pools;
634 } dm_thin_pool_table;
636 static void pool_table_init(void)
638 mutex_init(&dm_thin_pool_table.mutex);
639 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
642 static void __pool_table_insert(struct pool *pool)
644 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
645 list_add(&pool->list, &dm_thin_pool_table.pools);
648 static void __pool_table_remove(struct pool *pool)
650 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
651 list_del(&pool->list);
654 static struct pool *__pool_table_lookup(struct mapped_device *md)
656 struct pool *pool = NULL, *tmp;
658 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
660 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
661 if (tmp->pool_md == md) {
670 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
672 struct pool *pool = NULL, *tmp;
674 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
676 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
677 if (tmp->md_dev == md_dev) {
686 /*----------------------------------------------------------------*/
688 struct dm_thin_endio_hook {
690 struct dm_deferred_entry *shared_read_entry;
691 struct dm_deferred_entry *all_io_entry;
692 struct dm_thin_new_mapping *overwrite_mapping;
695 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
698 struct bio_list bios;
700 bio_list_init(&bios);
701 bio_list_merge(&bios, master);
702 bio_list_init(master);
704 while ((bio = bio_list_pop(&bios))) {
705 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
708 bio_endio(bio, DM_ENDIO_REQUEUE);
710 bio_list_add(master, bio);
714 static void requeue_io(struct thin_c *tc)
716 struct pool *pool = tc->pool;
719 spin_lock_irqsave(&pool->lock, flags);
720 __requeue_bio_list(tc, &pool->deferred_bios);
721 __requeue_bio_list(tc, &pool->retry_on_resume_list);
722 spin_unlock_irqrestore(&pool->lock, flags);
726 * This section of code contains the logic for processing a thin device's IO.
727 * Much of the code depends on pool object resources (lists, workqueues, etc)
728 * but most is exclusively called from the thin target rather than the thin-pool
732 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
734 sector_t block_nr = bio->bi_sector;
736 if (tc->pool->sectors_per_block_shift < 0)
737 (void) sector_div(block_nr, tc->pool->sectors_per_block);
739 block_nr >>= tc->pool->sectors_per_block_shift;
744 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
746 struct pool *pool = tc->pool;
747 sector_t bi_sector = bio->bi_sector;
749 bio->bi_bdev = tc->pool_dev->bdev;
750 if (tc->pool->sectors_per_block_shift < 0)
751 bio->bi_sector = (block * pool->sectors_per_block) +
752 sector_div(bi_sector, pool->sectors_per_block);
754 bio->bi_sector = (block << pool->sectors_per_block_shift) |
755 (bi_sector & (pool->sectors_per_block - 1));
758 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
760 bio->bi_bdev = tc->origin_dev->bdev;
763 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
765 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
766 dm_thin_changed_this_transaction(tc->td);
769 static void issue(struct thin_c *tc, struct bio *bio)
771 struct pool *pool = tc->pool;
774 if (!bio_triggers_commit(tc, bio)) {
775 generic_make_request(bio);
780 * Complete bio with an error if earlier I/O caused changes to
781 * the metadata that can't be committed e.g, due to I/O errors
782 * on the metadata device.
784 if (dm_thin_aborted_changes(tc->td)) {
790 * Batch together any bios that trigger commits and then issue a
791 * single commit for them in process_deferred_bios().
793 spin_lock_irqsave(&pool->lock, flags);
794 bio_list_add(&pool->deferred_flush_bios, bio);
795 spin_unlock_irqrestore(&pool->lock, flags);
798 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
800 remap_to_origin(tc, bio);
804 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
807 remap(tc, bio, block);
812 * wake_worker() is used when new work is queued and when pool_resume is
813 * ready to continue deferred IO processing.
815 static void wake_worker(struct pool *pool)
817 queue_work(pool->wq, &pool->worker);
820 /*----------------------------------------------------------------*/
823 * Bio endio functions.
825 struct dm_thin_new_mapping {
826 struct list_head list;
830 unsigned pass_discard:1;
833 dm_block_t virt_block;
834 dm_block_t data_block;
835 struct dm_bio_prison_cell *cell, *cell2;
839 * If the bio covers the whole area of a block then we can avoid
840 * zeroing or copying. Instead this bio is hooked. The bio will
841 * still be in the cell, so care has to be taken to avoid issuing
845 bio_end_io_t *saved_bi_end_io;
848 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
850 struct pool *pool = m->tc->pool;
852 if (m->quiesced && m->prepared) {
853 list_add(&m->list, &pool->prepared_mappings);
858 static void copy_complete(int read_err, unsigned long write_err, void *context)
861 struct dm_thin_new_mapping *m = context;
862 struct pool *pool = m->tc->pool;
864 m->err = read_err || write_err ? -EIO : 0;
866 spin_lock_irqsave(&pool->lock, flags);
868 __maybe_add_mapping(m);
869 spin_unlock_irqrestore(&pool->lock, flags);
872 static void overwrite_endio(struct bio *bio, int err)
875 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
876 struct dm_thin_new_mapping *m = h->overwrite_mapping;
877 struct pool *pool = m->tc->pool;
881 spin_lock_irqsave(&pool->lock, flags);
883 __maybe_add_mapping(m);
884 spin_unlock_irqrestore(&pool->lock, flags);
887 /*----------------------------------------------------------------*/
894 * Prepared mapping jobs.
898 * This sends the bios in the cell back to the deferred_bios list.
900 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
901 dm_block_t data_block)
903 struct pool *pool = tc->pool;
906 spin_lock_irqsave(&pool->lock, flags);
907 dm_cell_release(cell, &pool->deferred_bios);
908 spin_unlock_irqrestore(&tc->pool->lock, flags);
914 * Same as cell_defer above, except it omits one particular detainee,
915 * a write bio that covers the block and has already been processed.
917 static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
919 struct bio_list bios;
920 struct pool *pool = tc->pool;
923 bio_list_init(&bios);
925 spin_lock_irqsave(&pool->lock, flags);
926 dm_cell_release_no_holder(cell, &pool->deferred_bios);
927 spin_unlock_irqrestore(&pool->lock, flags);
932 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
935 m->bio->bi_end_io = m->saved_bi_end_io;
936 dm_cell_error(m->cell);
938 mempool_free(m, m->tc->pool->mapping_pool);
940 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
942 struct thin_c *tc = m->tc;
948 bio->bi_end_io = m->saved_bi_end_io;
951 dm_cell_error(m->cell);
956 * Commit the prepared block into the mapping btree.
957 * Any I/O for this block arriving after this point will get
958 * remapped to it directly.
960 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
962 DMERR("dm_thin_insert_block() failed");
963 dm_cell_error(m->cell);
968 * Release any bios held while the block was being provisioned.
969 * If we are processing a write bio that completely covers the block,
970 * we already processed it so can ignore it now when processing
971 * the bios in the cell.
974 cell_defer_except(tc, m->cell);
977 cell_defer(tc, m->cell, m->data_block);
981 mempool_free(m, tc->pool->mapping_pool);
984 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
986 struct thin_c *tc = m->tc;
988 bio_io_error(m->bio);
989 cell_defer_except(tc, m->cell);
990 cell_defer_except(tc, m->cell2);
991 mempool_free(m, tc->pool->mapping_pool);
994 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
996 struct thin_c *tc = m->tc;
999 remap_and_issue(tc, m->bio, m->data_block);
1001 bio_endio(m->bio, 0);
1003 cell_defer_except(tc, m->cell);
1004 cell_defer_except(tc, m->cell2);
1005 mempool_free(m, tc->pool->mapping_pool);
1008 static void process_prepared_discard(struct dm_thin_new_mapping *m)
1011 struct thin_c *tc = m->tc;
1013 r = dm_thin_remove_block(tc->td, m->virt_block);
1015 DMERR("dm_thin_remove_block() failed");
1017 process_prepared_discard_passdown(m);
1020 static void process_prepared(struct pool *pool, struct list_head *head,
1021 process_mapping_fn *fn)
1023 unsigned long flags;
1024 struct list_head maps;
1025 struct dm_thin_new_mapping *m, *tmp;
1027 INIT_LIST_HEAD(&maps);
1028 spin_lock_irqsave(&pool->lock, flags);
1029 list_splice_init(head, &maps);
1030 spin_unlock_irqrestore(&pool->lock, flags);
1032 list_for_each_entry_safe(m, tmp, &maps, list)
1037 * Deferred bio jobs.
1039 static int io_overlaps_block(struct pool *pool, struct bio *bio)
1041 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
1044 static int io_overwrites_block(struct pool *pool, struct bio *bio)
1046 return (bio_data_dir(bio) == WRITE) &&
1047 io_overlaps_block(pool, bio);
1050 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1053 *save = bio->bi_end_io;
1054 bio->bi_end_io = fn;
1057 static int ensure_next_mapping(struct pool *pool)
1059 if (pool->next_mapping)
1062 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
1064 return pool->next_mapping ? 0 : -ENOMEM;
1067 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
1069 struct dm_thin_new_mapping *r = pool->next_mapping;
1071 BUG_ON(!pool->next_mapping);
1073 pool->next_mapping = NULL;
1078 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1079 struct dm_dev *origin, dm_block_t data_origin,
1080 dm_block_t data_dest,
1081 struct dm_bio_prison_cell *cell, struct bio *bio)
1084 struct pool *pool = tc->pool;
1085 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1087 INIT_LIST_HEAD(&m->list);
1091 m->virt_block = virt_block;
1092 m->data_block = data_dest;
1097 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
1101 * IO to pool_dev remaps to the pool target's data_dev.
1103 * If the whole block of data is being overwritten, we can issue the
1104 * bio immediately. Otherwise we use kcopyd to clone the data first.
1106 if (io_overwrites_block(pool, bio)) {
1107 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1109 h->overwrite_mapping = m;
1111 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1112 remap_and_issue(tc, bio, data_dest);
1114 struct dm_io_region from, to;
1116 from.bdev = origin->bdev;
1117 from.sector = data_origin * pool->sectors_per_block;
1118 from.count = pool->sectors_per_block;
1120 to.bdev = tc->pool_dev->bdev;
1121 to.sector = data_dest * pool->sectors_per_block;
1122 to.count = pool->sectors_per_block;
1124 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1125 0, copy_complete, m);
1127 mempool_free(m, pool->mapping_pool);
1128 DMERR("dm_kcopyd_copy() failed");
1129 dm_cell_error(cell);
1134 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1135 dm_block_t data_origin, dm_block_t data_dest,
1136 struct dm_bio_prison_cell *cell, struct bio *bio)
1138 schedule_copy(tc, virt_block, tc->pool_dev,
1139 data_origin, data_dest, cell, bio);
1142 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1143 dm_block_t data_dest,
1144 struct dm_bio_prison_cell *cell, struct bio *bio)
1146 schedule_copy(tc, virt_block, tc->origin_dev,
1147 virt_block, data_dest, cell, bio);
1150 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1151 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1154 struct pool *pool = tc->pool;
1155 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1157 INIT_LIST_HEAD(&m->list);
1161 m->virt_block = virt_block;
1162 m->data_block = data_block;
1168 * If the whole block of data is being overwritten or we are not
1169 * zeroing pre-existing data, we can issue the bio immediately.
1170 * Otherwise we use kcopyd to zero the data first.
1172 if (!pool->pf.zero_new_blocks)
1173 process_prepared_mapping(m);
1175 else if (io_overwrites_block(pool, bio)) {
1176 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1178 h->overwrite_mapping = m;
1180 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1181 remap_and_issue(tc, bio, data_block);
1184 struct dm_io_region to;
1186 to.bdev = tc->pool_dev->bdev;
1187 to.sector = data_block * pool->sectors_per_block;
1188 to.count = pool->sectors_per_block;
1190 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
1192 mempool_free(m, pool->mapping_pool);
1193 DMERR("dm_kcopyd_zero() failed");
1194 dm_cell_error(cell);
1199 static int commit(struct pool *pool)
1203 r = dm_pool_commit_metadata(pool->pmd);
1205 DMERR("commit failed, error = %d", r);
1211 * A non-zero return indicates read_only or fail_io mode.
1212 * Many callers don't care about the return value.
1214 static int commit_or_fallback(struct pool *pool)
1218 if (get_pool_mode(pool) != PM_WRITE)
1223 set_pool_mode(pool, PM_READ_ONLY);
1228 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1231 dm_block_t free_blocks;
1232 unsigned long flags;
1233 struct pool *pool = tc->pool;
1235 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1239 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1240 DMWARN("%s: reached low water mark, sending event.",
1241 dm_device_name(pool->pool_md));
1242 spin_lock_irqsave(&pool->lock, flags);
1243 pool->low_water_triggered = 1;
1244 spin_unlock_irqrestore(&pool->lock, flags);
1245 dm_table_event(pool->ti->table);
1249 if (pool->no_free_space)
1253 * Try to commit to see if that will free up some
1256 (void) commit_or_fallback(pool);
1258 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
1263 * If we still have no space we set a flag to avoid
1264 * doing all this checking and return -ENOSPC.
1267 DMWARN("%s: no free space available.",
1268 dm_device_name(pool->pool_md));
1269 spin_lock_irqsave(&pool->lock, flags);
1270 pool->no_free_space = 1;
1271 spin_unlock_irqrestore(&pool->lock, flags);
1277 r = dm_pool_alloc_data_block(pool->pmd, result);
1285 * If we have run out of space, queue bios until the device is
1286 * resumed, presumably after having been reloaded with more space.
1288 static void retry_on_resume(struct bio *bio)
1290 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1291 struct thin_c *tc = h->tc;
1292 struct pool *pool = tc->pool;
1293 unsigned long flags;
1295 spin_lock_irqsave(&pool->lock, flags);
1296 bio_list_add(&pool->retry_on_resume_list, bio);
1297 spin_unlock_irqrestore(&pool->lock, flags);
1300 static void no_space(struct dm_bio_prison_cell *cell)
1303 struct bio_list bios;
1305 bio_list_init(&bios);
1306 dm_cell_release(cell, &bios);
1308 while ((bio = bio_list_pop(&bios)))
1309 retry_on_resume(bio);
1312 static void process_discard(struct thin_c *tc, struct bio *bio)
1315 unsigned long flags;
1316 struct pool *pool = tc->pool;
1317 struct dm_bio_prison_cell *cell, *cell2;
1318 struct dm_cell_key key, key2;
1319 dm_block_t block = get_bio_block(tc, bio);
1320 struct dm_thin_lookup_result lookup_result;
1321 struct dm_thin_new_mapping *m;
1323 build_virtual_key(tc->td, block, &key);
1324 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1327 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1331 * Check nobody is fiddling with this pool block. This can
1332 * happen if someone's in the process of breaking sharing
1335 build_data_key(tc->td, lookup_result.block, &key2);
1336 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
1337 dm_cell_release_singleton(cell, bio);
1341 if (io_overlaps_block(pool, bio)) {
1343 * IO may still be going to the destination block. We must
1344 * quiesce before we can do the removal.
1346 m = get_next_mapping(pool);
1348 m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1349 m->virt_block = block;
1350 m->data_block = lookup_result.block;
1356 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1357 spin_lock_irqsave(&pool->lock, flags);
1358 list_add(&m->list, &pool->prepared_discards);
1359 spin_unlock_irqrestore(&pool->lock, flags);
1364 * The DM core makes sure that the discard doesn't span
1365 * a block boundary. So we submit the discard of a
1366 * partial block appropriately.
1368 dm_cell_release_singleton(cell, bio);
1369 dm_cell_release_singleton(cell2, bio);
1370 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1371 remap_and_issue(tc, bio, lookup_result.block);
1379 * It isn't provisioned, just forget it.
1381 dm_cell_release_singleton(cell, bio);
1386 DMERR("discard: find block unexpectedly returned %d", r);
1387 dm_cell_release_singleton(cell, bio);
1393 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1394 struct dm_cell_key *key,
1395 struct dm_thin_lookup_result *lookup_result,
1396 struct dm_bio_prison_cell *cell)
1399 dm_block_t data_block;
1401 r = alloc_data_block(tc, &data_block);
1404 schedule_internal_copy(tc, block, lookup_result->block,
1405 data_block, cell, bio);
1413 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1414 dm_cell_error(cell);
1419 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1421 struct dm_thin_lookup_result *lookup_result)
1423 struct dm_bio_prison_cell *cell;
1424 struct pool *pool = tc->pool;
1425 struct dm_cell_key key;
1428 * If cell is already occupied, then sharing is already in the process
1429 * of being broken so we have nothing further to do here.
1431 build_data_key(tc->td, lookup_result->block, &key);
1432 if (dm_bio_detain(pool->prison, &key, bio, &cell))
1435 if (bio_data_dir(bio) == WRITE && bio->bi_size)
1436 break_sharing(tc, bio, block, &key, lookup_result, cell);
1438 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1440 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1442 dm_cell_release_singleton(cell, bio);
1443 remap_and_issue(tc, bio, lookup_result->block);
1447 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1448 struct dm_bio_prison_cell *cell)
1451 dm_block_t data_block;
1454 * Remap empty bios (flushes) immediately, without provisioning.
1456 if (!bio->bi_size) {
1457 dm_cell_release_singleton(cell, bio);
1458 remap_and_issue(tc, bio, 0);
1463 * Fill read bios with zeroes and complete them immediately.
1465 if (bio_data_dir(bio) == READ) {
1467 dm_cell_release_singleton(cell, bio);
1472 r = alloc_data_block(tc, &data_block);
1476 schedule_external_copy(tc, block, data_block, cell, bio);
1478 schedule_zero(tc, block, data_block, cell, bio);
1486 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1487 set_pool_mode(tc->pool, PM_READ_ONLY);
1488 dm_cell_error(cell);
1493 static void process_bio(struct thin_c *tc, struct bio *bio)
1496 dm_block_t block = get_bio_block(tc, bio);
1497 struct dm_bio_prison_cell *cell;
1498 struct dm_cell_key key;
1499 struct dm_thin_lookup_result lookup_result;
1502 * If cell is already occupied, then the block is already
1503 * being provisioned so we have nothing further to do here.
1505 build_virtual_key(tc->td, block, &key);
1506 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1509 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1513 * We can release this cell now. This thread is the only
1514 * one that puts bios into a cell, and we know there were
1515 * no preceding bios.
1518 * TODO: this will probably have to change when discard goes
1521 dm_cell_release_singleton(cell, bio);
1523 if (lookup_result.shared)
1524 process_shared_bio(tc, bio, block, &lookup_result);
1526 remap_and_issue(tc, bio, lookup_result.block);
1530 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1531 dm_cell_release_singleton(cell, bio);
1532 remap_to_origin_and_issue(tc, bio);
1534 provision_block(tc, bio, block, cell);
1538 DMERR("dm_thin_find_block() failed, error = %d", r);
1539 dm_cell_release_singleton(cell, bio);
1545 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1548 int rw = bio_data_dir(bio);
1549 dm_block_t block = get_bio_block(tc, bio);
1550 struct dm_thin_lookup_result lookup_result;
1552 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1555 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1558 remap_and_issue(tc, bio, lookup_result.block);
1567 if (tc->origin_dev) {
1568 remap_to_origin_and_issue(tc, bio);
1577 DMERR("dm_thin_find_block() failed, error = %d", r);
1583 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1588 static int need_commit_due_to_time(struct pool *pool)
1590 return jiffies < pool->last_commit_jiffies ||
1591 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1594 static void process_deferred_bios(struct pool *pool)
1596 unsigned long flags;
1598 struct bio_list bios;
1600 bio_list_init(&bios);
1602 spin_lock_irqsave(&pool->lock, flags);
1603 bio_list_merge(&bios, &pool->deferred_bios);
1604 bio_list_init(&pool->deferred_bios);
1605 spin_unlock_irqrestore(&pool->lock, flags);
1607 while ((bio = bio_list_pop(&bios))) {
1608 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1609 struct thin_c *tc = h->tc;
1612 * If we've got no free new_mapping structs, and processing
1613 * this bio might require one, we pause until there are some
1614 * prepared mappings to process.
1616 if (ensure_next_mapping(pool)) {
1617 spin_lock_irqsave(&pool->lock, flags);
1618 bio_list_merge(&pool->deferred_bios, &bios);
1619 spin_unlock_irqrestore(&pool->lock, flags);
1624 if (bio->bi_rw & REQ_DISCARD)
1625 pool->process_discard(tc, bio);
1627 pool->process_bio(tc, bio);
1631 * If there are any deferred flush bios, we must commit
1632 * the metadata before issuing them.
1634 bio_list_init(&bios);
1635 spin_lock_irqsave(&pool->lock, flags);
1636 bio_list_merge(&bios, &pool->deferred_flush_bios);
1637 bio_list_init(&pool->deferred_flush_bios);
1638 spin_unlock_irqrestore(&pool->lock, flags);
1640 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1643 if (commit_or_fallback(pool)) {
1644 while ((bio = bio_list_pop(&bios)))
1648 pool->last_commit_jiffies = jiffies;
1650 while ((bio = bio_list_pop(&bios)))
1651 generic_make_request(bio);
1654 static void do_worker(struct work_struct *ws)
1656 struct pool *pool = container_of(ws, struct pool, worker);
1658 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1659 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1660 process_deferred_bios(pool);
1664 * We want to commit periodically so that not too much
1665 * unwritten data builds up.
1667 static void do_waker(struct work_struct *ws)
1669 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1671 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1674 /*----------------------------------------------------------------*/
1676 static enum pool_mode get_pool_mode(struct pool *pool)
1678 return pool->pf.mode;
1681 static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1685 pool->pf.mode = mode;
1689 DMERR("switching pool to failure mode");
1690 pool->process_bio = process_bio_fail;
1691 pool->process_discard = process_bio_fail;
1692 pool->process_prepared_mapping = process_prepared_mapping_fail;
1693 pool->process_prepared_discard = process_prepared_discard_fail;
1697 DMERR("switching pool to read-only mode");
1698 r = dm_pool_abort_metadata(pool->pmd);
1700 DMERR("aborting transaction failed");
1701 set_pool_mode(pool, PM_FAIL);
1703 dm_pool_metadata_read_only(pool->pmd);
1704 pool->process_bio = process_bio_read_only;
1705 pool->process_discard = process_discard;
1706 pool->process_prepared_mapping = process_prepared_mapping_fail;
1707 pool->process_prepared_discard = process_prepared_discard_passdown;
1712 pool->process_bio = process_bio;
1713 pool->process_discard = process_discard;
1714 pool->process_prepared_mapping = process_prepared_mapping;
1715 pool->process_prepared_discard = process_prepared_discard;
1720 /*----------------------------------------------------------------*/
1723 * Mapping functions.
1727 * Called only while mapping a thin bio to hand it over to the workqueue.
1729 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1731 unsigned long flags;
1732 struct pool *pool = tc->pool;
1734 spin_lock_irqsave(&pool->lock, flags);
1735 bio_list_add(&pool->deferred_bios, bio);
1736 spin_unlock_irqrestore(&pool->lock, flags);
1741 static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1743 struct pool *pool = tc->pool;
1744 struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1747 h->shared_read_entry = NULL;
1748 h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : dm_deferred_entry_inc(pool->all_io_ds);
1749 h->overwrite_mapping = NULL;
1755 * Non-blocking function called from the thin target's map function.
1757 static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1758 union map_info *map_context)
1761 struct thin_c *tc = ti->private;
1762 dm_block_t block = get_bio_block(tc, bio);
1763 struct dm_thin_device *td = tc->td;
1764 struct dm_thin_lookup_result result;
1766 map_context->ptr = thin_hook_bio(tc, bio);
1768 if (get_pool_mode(tc->pool) == PM_FAIL) {
1770 return DM_MAPIO_SUBMITTED;
1773 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1774 thin_defer_bio(tc, bio);
1775 return DM_MAPIO_SUBMITTED;
1778 r = dm_thin_find_block(td, block, 0, &result);
1781 * Note that we defer readahead too.
1785 if (unlikely(result.shared)) {
1787 * We have a race condition here between the
1788 * result.shared value returned by the lookup and
1789 * snapshot creation, which may cause new
1792 * To avoid this always quiesce the origin before
1793 * taking the snap. You want to do this anyway to
1794 * ensure a consistent application view
1797 * More distant ancestors are irrelevant. The
1798 * shared flag will be set in their case.
1800 thin_defer_bio(tc, bio);
1801 r = DM_MAPIO_SUBMITTED;
1803 remap(tc, bio, result.block);
1804 r = DM_MAPIO_REMAPPED;
1809 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1811 * This block isn't provisioned, and we have no way
1812 * of doing so. Just error it.
1815 r = DM_MAPIO_SUBMITTED;
1822 * In future, the failed dm_thin_find_block above could
1823 * provide the hint to load the metadata into cache.
1825 thin_defer_bio(tc, bio);
1826 r = DM_MAPIO_SUBMITTED;
1831 * Must always call bio_io_error on failure.
1832 * dm_thin_find_block can fail with -EINVAL if the
1833 * pool is switched to fail-io mode.
1836 r = DM_MAPIO_SUBMITTED;
1843 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1846 unsigned long flags;
1847 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1849 spin_lock_irqsave(&pt->pool->lock, flags);
1850 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1851 spin_unlock_irqrestore(&pt->pool->lock, flags);
1854 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1855 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1861 static void __requeue_bios(struct pool *pool)
1863 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1864 bio_list_init(&pool->retry_on_resume_list);
1867 /*----------------------------------------------------------------
1868 * Binding of control targets to a pool object
1869 *--------------------------------------------------------------*/
1870 static bool data_dev_supports_discard(struct pool_c *pt)
1872 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1874 return q && blk_queue_discard(q);
1878 * If discard_passdown was enabled verify that the data device
1879 * supports discards. Disable discard_passdown if not.
1881 static void disable_passdown_if_not_supported(struct pool_c *pt)
1883 struct pool *pool = pt->pool;
1884 struct block_device *data_bdev = pt->data_dev->bdev;
1885 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1886 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1887 const char *reason = NULL;
1888 char buf[BDEVNAME_SIZE];
1890 if (!pt->adjusted_pf.discard_passdown)
1893 if (!data_dev_supports_discard(pt))
1894 reason = "discard unsupported";
1896 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1897 reason = "max discard sectors smaller than a block";
1899 else if (data_limits->discard_granularity > block_size)
1900 reason = "discard granularity larger than a block";
1902 else if (block_size & (data_limits->discard_granularity - 1))
1903 reason = "discard granularity not a factor of block size";
1906 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1907 pt->adjusted_pf.discard_passdown = false;
1911 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1913 struct pool_c *pt = ti->private;
1916 * We want to make sure that degraded pools are never upgraded.
1918 enum pool_mode old_mode = pool->pf.mode;
1919 enum pool_mode new_mode = pt->adjusted_pf.mode;
1921 if (old_mode > new_mode)
1922 new_mode = old_mode;
1925 pool->low_water_blocks = pt->low_water_blocks;
1926 pool->pf = pt->adjusted_pf;
1928 set_pool_mode(pool, new_mode);
1933 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1939 /*----------------------------------------------------------------
1941 *--------------------------------------------------------------*/
1942 /* Initialize pool features. */
1943 static void pool_features_init(struct pool_features *pf)
1945 pf->mode = PM_WRITE;
1946 pf->zero_new_blocks = true;
1947 pf->discard_enabled = true;
1948 pf->discard_passdown = true;
1951 static void __pool_destroy(struct pool *pool)
1953 __pool_table_remove(pool);
1955 if (dm_pool_metadata_close(pool->pmd) < 0)
1956 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1958 dm_bio_prison_destroy(pool->prison);
1959 dm_kcopyd_client_destroy(pool->copier);
1962 destroy_workqueue(pool->wq);
1964 if (pool->next_mapping)
1965 mempool_free(pool->next_mapping, pool->mapping_pool);
1966 mempool_destroy(pool->mapping_pool);
1967 mempool_destroy(pool->endio_hook_pool);
1968 dm_deferred_set_destroy(pool->shared_read_ds);
1969 dm_deferred_set_destroy(pool->all_io_ds);
1973 static struct kmem_cache *_new_mapping_cache;
1974 static struct kmem_cache *_endio_hook_cache;
1976 static struct pool *pool_create(struct mapped_device *pool_md,
1977 struct block_device *metadata_dev,
1978 unsigned long block_size,
1979 int read_only, char **error)
1984 struct dm_pool_metadata *pmd;
1985 bool format_device = read_only ? false : true;
1987 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1989 *error = "Error creating metadata object";
1990 return (struct pool *)pmd;
1993 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1995 *error = "Error allocating memory for pool";
1996 err_p = ERR_PTR(-ENOMEM);
2001 pool->sectors_per_block = block_size;
2002 if (block_size & (block_size - 1))
2003 pool->sectors_per_block_shift = -1;
2005 pool->sectors_per_block_shift = __ffs(block_size);
2006 pool->low_water_blocks = 0;
2007 pool_features_init(&pool->pf);
2008 pool->prison = dm_bio_prison_create(PRISON_CELLS);
2009 if (!pool->prison) {
2010 *error = "Error creating pool's bio prison";
2011 err_p = ERR_PTR(-ENOMEM);
2015 pool->copier = dm_kcopyd_client_create();
2016 if (IS_ERR(pool->copier)) {
2017 r = PTR_ERR(pool->copier);
2018 *error = "Error creating pool's kcopyd client";
2020 goto bad_kcopyd_client;
2024 * Create singlethreaded workqueue that will service all devices
2025 * that use this metadata.
2027 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2029 *error = "Error creating pool's workqueue";
2030 err_p = ERR_PTR(-ENOMEM);
2034 INIT_WORK(&pool->worker, do_worker);
2035 INIT_DELAYED_WORK(&pool->waker, do_waker);
2036 spin_lock_init(&pool->lock);
2037 bio_list_init(&pool->deferred_bios);
2038 bio_list_init(&pool->deferred_flush_bios);
2039 INIT_LIST_HEAD(&pool->prepared_mappings);
2040 INIT_LIST_HEAD(&pool->prepared_discards);
2041 pool->low_water_triggered = 0;
2042 pool->no_free_space = 0;
2043 bio_list_init(&pool->retry_on_resume_list);
2045 pool->shared_read_ds = dm_deferred_set_create();
2046 if (!pool->shared_read_ds) {
2047 *error = "Error creating pool's shared read deferred set";
2048 err_p = ERR_PTR(-ENOMEM);
2049 goto bad_shared_read_ds;
2052 pool->all_io_ds = dm_deferred_set_create();
2053 if (!pool->all_io_ds) {
2054 *error = "Error creating pool's all io deferred set";
2055 err_p = ERR_PTR(-ENOMEM);
2059 pool->next_mapping = NULL;
2060 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2061 _new_mapping_cache);
2062 if (!pool->mapping_pool) {
2063 *error = "Error creating pool's mapping mempool";
2064 err_p = ERR_PTR(-ENOMEM);
2065 goto bad_mapping_pool;
2068 pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
2070 if (!pool->endio_hook_pool) {
2071 *error = "Error creating pool's endio_hook mempool";
2072 err_p = ERR_PTR(-ENOMEM);
2073 goto bad_endio_hook_pool;
2075 pool->ref_count = 1;
2076 pool->last_commit_jiffies = jiffies;
2077 pool->pool_md = pool_md;
2078 pool->md_dev = metadata_dev;
2079 __pool_table_insert(pool);
2083 bad_endio_hook_pool:
2084 mempool_destroy(pool->mapping_pool);
2086 dm_deferred_set_destroy(pool->all_io_ds);
2088 dm_deferred_set_destroy(pool->shared_read_ds);
2090 destroy_workqueue(pool->wq);
2092 dm_kcopyd_client_destroy(pool->copier);
2094 dm_bio_prison_destroy(pool->prison);
2098 if (dm_pool_metadata_close(pmd))
2099 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2104 static void __pool_inc(struct pool *pool)
2106 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2110 static void __pool_dec(struct pool *pool)
2112 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2113 BUG_ON(!pool->ref_count);
2114 if (!--pool->ref_count)
2115 __pool_destroy(pool);
2118 static struct pool *__pool_find(struct mapped_device *pool_md,
2119 struct block_device *metadata_dev,
2120 unsigned long block_size, int read_only,
2121 char **error, int *created)
2123 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2126 if (pool->pool_md != pool_md) {
2127 *error = "metadata device already in use by a pool";
2128 return ERR_PTR(-EBUSY);
2133 pool = __pool_table_lookup(pool_md);
2135 if (pool->md_dev != metadata_dev) {
2136 *error = "different pool cannot replace a pool";
2137 return ERR_PTR(-EINVAL);
2142 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2150 /*----------------------------------------------------------------
2151 * Pool target methods
2152 *--------------------------------------------------------------*/
2153 static void pool_dtr(struct dm_target *ti)
2155 struct pool_c *pt = ti->private;
2157 mutex_lock(&dm_thin_pool_table.mutex);
2159 unbind_control_target(pt->pool, ti);
2160 __pool_dec(pt->pool);
2161 dm_put_device(ti, pt->metadata_dev);
2162 dm_put_device(ti, pt->data_dev);
2165 mutex_unlock(&dm_thin_pool_table.mutex);
2168 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2169 struct dm_target *ti)
2173 const char *arg_name;
2175 static struct dm_arg _args[] = {
2176 {0, 3, "Invalid number of pool feature arguments"},
2180 * No feature arguments supplied.
2185 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2189 while (argc && !r) {
2190 arg_name = dm_shift_arg(as);
2193 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2194 pf->zero_new_blocks = false;
2196 else if (!strcasecmp(arg_name, "ignore_discard"))
2197 pf->discard_enabled = false;
2199 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2200 pf->discard_passdown = false;
2202 else if (!strcasecmp(arg_name, "read_only"))
2203 pf->mode = PM_READ_ONLY;
2206 ti->error = "Unrecognised pool feature requested";
2216 * thin-pool <metadata dev> <data dev>
2217 * <data block size (sectors)>
2218 * <low water mark (blocks)>
2219 * [<#feature args> [<arg>]*]
2221 * Optional feature arguments are:
2222 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2223 * ignore_discard: disable discard
2224 * no_discard_passdown: don't pass discards down to the data device
2226 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2228 int r, pool_created = 0;
2231 struct pool_features pf;
2232 struct dm_arg_set as;
2233 struct dm_dev *data_dev;
2234 unsigned long block_size;
2235 dm_block_t low_water_blocks;
2236 struct dm_dev *metadata_dev;
2237 sector_t metadata_dev_size;
2238 char b[BDEVNAME_SIZE];
2241 * FIXME Remove validation from scope of lock.
2243 mutex_lock(&dm_thin_pool_table.mutex);
2246 ti->error = "Invalid argument count";
2253 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
2255 ti->error = "Error opening metadata block device";
2259 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
2260 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2261 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2262 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2264 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2266 ti->error = "Error getting data device";
2270 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2271 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2272 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2273 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2274 ti->error = "Invalid block size";
2279 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2280 ti->error = "Invalid low water mark";
2286 * Set default pool features.
2288 pool_features_init(&pf);
2290 dm_consume_args(&as, 4);
2291 r = parse_pool_features(&as, &pf, ti);
2295 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2301 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2302 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2309 * 'pool_created' reflects whether this is the first table load.
2310 * Top level discard support is not allowed to be changed after
2311 * initial load. This would require a pool reload to trigger thin
2314 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2315 ti->error = "Discard support cannot be disabled once enabled";
2317 goto out_flags_changed;
2322 pt->metadata_dev = metadata_dev;
2323 pt->data_dev = data_dev;
2324 pt->low_water_blocks = low_water_blocks;
2325 pt->adjusted_pf = pt->requested_pf = pf;
2326 ti->num_flush_requests = 1;
2329 * Only need to enable discards if the pool should pass
2330 * them down to the data device. The thin device's discard
2331 * processing will cause mappings to be removed from the btree.
2333 if (pf.discard_enabled && pf.discard_passdown) {
2334 ti->num_discard_requests = 1;
2337 * Setting 'discards_supported' circumvents the normal
2338 * stacking of discard limits (this keeps the pool and
2339 * thin devices' discard limits consistent).
2341 ti->discards_supported = true;
2342 ti->discard_zeroes_data_unsupported = true;
2346 pt->callbacks.congested_fn = pool_is_congested;
2347 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2349 mutex_unlock(&dm_thin_pool_table.mutex);
2358 dm_put_device(ti, data_dev);
2360 dm_put_device(ti, metadata_dev);
2362 mutex_unlock(&dm_thin_pool_table.mutex);
2367 static int pool_map(struct dm_target *ti, struct bio *bio,
2368 union map_info *map_context)
2371 struct pool_c *pt = ti->private;
2372 struct pool *pool = pt->pool;
2373 unsigned long flags;
2376 * As this is a singleton target, ti->begin is always zero.
2378 spin_lock_irqsave(&pool->lock, flags);
2379 bio->bi_bdev = pt->data_dev->bdev;
2380 r = DM_MAPIO_REMAPPED;
2381 spin_unlock_irqrestore(&pool->lock, flags);
2387 * Retrieves the number of blocks of the data device from
2388 * the superblock and compares it to the actual device size,
2389 * thus resizing the data device in case it has grown.
2391 * This both copes with opening preallocated data devices in the ctr
2392 * being followed by a resume
2394 * calling the resume method individually after userspace has
2395 * grown the data device in reaction to a table event.
2397 static int pool_preresume(struct dm_target *ti)
2400 struct pool_c *pt = ti->private;
2401 struct pool *pool = pt->pool;
2402 sector_t data_size = ti->len;
2403 dm_block_t sb_data_size;
2406 * Take control of the pool object.
2408 r = bind_control_target(pool, ti);
2412 (void) sector_div(data_size, pool->sectors_per_block);
2414 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2416 DMERR("failed to retrieve data device size");
2420 if (data_size < sb_data_size) {
2421 DMERR("pool target too small, is %llu blocks (expected %llu)",
2422 (unsigned long long)data_size, sb_data_size);
2425 } else if (data_size > sb_data_size) {
2426 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2428 DMERR("failed to resize data device");
2429 /* FIXME Stricter than necessary: Rollback transaction instead here */
2430 set_pool_mode(pool, PM_READ_ONLY);
2434 (void) commit_or_fallback(pool);
2440 static void pool_resume(struct dm_target *ti)
2442 struct pool_c *pt = ti->private;
2443 struct pool *pool = pt->pool;
2444 unsigned long flags;
2446 spin_lock_irqsave(&pool->lock, flags);
2447 pool->low_water_triggered = 0;
2448 pool->no_free_space = 0;
2449 __requeue_bios(pool);
2450 spin_unlock_irqrestore(&pool->lock, flags);
2452 do_waker(&pool->waker.work);
2455 static void pool_postsuspend(struct dm_target *ti)
2457 struct pool_c *pt = ti->private;
2458 struct pool *pool = pt->pool;
2460 cancel_delayed_work(&pool->waker);
2461 flush_workqueue(pool->wq);
2462 (void) commit_or_fallback(pool);
2465 static int check_arg_count(unsigned argc, unsigned args_required)
2467 if (argc != args_required) {
2468 DMWARN("Message received with %u arguments instead of %u.",
2469 argc, args_required);
2476 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2478 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2479 *dev_id <= MAX_DEV_ID)
2483 DMWARN("Message received with invalid device id: %s", arg);
2488 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2493 r = check_arg_count(argc, 2);
2497 r = read_dev_id(argv[1], &dev_id, 1);
2501 r = dm_pool_create_thin(pool->pmd, dev_id);
2503 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2511 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2514 dm_thin_id origin_dev_id;
2517 r = check_arg_count(argc, 3);
2521 r = read_dev_id(argv[1], &dev_id, 1);
2525 r = read_dev_id(argv[2], &origin_dev_id, 1);
2529 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2531 DMWARN("Creation of new snapshot %s of device %s failed.",
2539 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2544 r = check_arg_count(argc, 2);
2548 r = read_dev_id(argv[1], &dev_id, 1);
2552 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2554 DMWARN("Deletion of thin device %s failed.", argv[1]);
2559 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2561 dm_thin_id old_id, new_id;
2564 r = check_arg_count(argc, 3);
2568 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2569 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2573 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2574 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2578 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2580 DMWARN("Failed to change transaction id from %s to %s.",
2588 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2592 r = check_arg_count(argc, 1);
2596 (void) commit_or_fallback(pool);
2598 r = dm_pool_reserve_metadata_snap(pool->pmd);
2600 DMWARN("reserve_metadata_snap message failed.");
2605 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2609 r = check_arg_count(argc, 1);
2613 r = dm_pool_release_metadata_snap(pool->pmd);
2615 DMWARN("release_metadata_snap message failed.");
2621 * Messages supported:
2622 * create_thin <dev_id>
2623 * create_snap <dev_id> <origin_id>
2625 * trim <dev_id> <new_size_in_sectors>
2626 * set_transaction_id <current_trans_id> <new_trans_id>
2627 * reserve_metadata_snap
2628 * release_metadata_snap
2630 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2633 struct pool_c *pt = ti->private;
2634 struct pool *pool = pt->pool;
2636 if (!strcasecmp(argv[0], "create_thin"))
2637 r = process_create_thin_mesg(argc, argv, pool);
2639 else if (!strcasecmp(argv[0], "create_snap"))
2640 r = process_create_snap_mesg(argc, argv, pool);
2642 else if (!strcasecmp(argv[0], "delete"))
2643 r = process_delete_mesg(argc, argv, pool);
2645 else if (!strcasecmp(argv[0], "set_transaction_id"))
2646 r = process_set_transaction_id_mesg(argc, argv, pool);
2648 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2649 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2651 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2652 r = process_release_metadata_snap_mesg(argc, argv, pool);
2655 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2658 (void) commit_or_fallback(pool);
2663 static void emit_flags(struct pool_features *pf, char *result,
2664 unsigned sz, unsigned maxlen)
2666 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2667 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2668 DMEMIT("%u ", count);
2670 if (!pf->zero_new_blocks)
2671 DMEMIT("skip_block_zeroing ");
2673 if (!pf->discard_enabled)
2674 DMEMIT("ignore_discard ");
2676 if (!pf->discard_passdown)
2677 DMEMIT("no_discard_passdown ");
2679 if (pf->mode == PM_READ_ONLY)
2680 DMEMIT("read_only ");
2685 * <transaction id> <used metadata sectors>/<total metadata sectors>
2686 * <used data sectors>/<total data sectors> <held metadata root>
2688 static int pool_status(struct dm_target *ti, status_type_t type,
2689 unsigned status_flags, char *result, unsigned maxlen)
2693 uint64_t transaction_id;
2694 dm_block_t nr_free_blocks_data;
2695 dm_block_t nr_free_blocks_metadata;
2696 dm_block_t nr_blocks_data;
2697 dm_block_t nr_blocks_metadata;
2698 dm_block_t held_root;
2699 char buf[BDEVNAME_SIZE];
2700 char buf2[BDEVNAME_SIZE];
2701 struct pool_c *pt = ti->private;
2702 struct pool *pool = pt->pool;
2705 case STATUSTYPE_INFO:
2706 if (get_pool_mode(pool) == PM_FAIL) {
2711 /* Commit to ensure statistics aren't out-of-date */
2712 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2713 (void) commit_or_fallback(pool);
2715 r = dm_pool_get_metadata_transaction_id(pool->pmd,
2720 r = dm_pool_get_free_metadata_block_count(pool->pmd,
2721 &nr_free_blocks_metadata);
2725 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2729 r = dm_pool_get_free_block_count(pool->pmd,
2730 &nr_free_blocks_data);
2734 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2738 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2742 DMEMIT("%llu %llu/%llu %llu/%llu ",
2743 (unsigned long long)transaction_id,
2744 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2745 (unsigned long long)nr_blocks_metadata,
2746 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2747 (unsigned long long)nr_blocks_data);
2750 DMEMIT("%llu ", held_root);
2754 if (pool->pf.mode == PM_READ_ONLY)
2759 if (pool->pf.discard_enabled && pool->pf.discard_passdown)
2760 DMEMIT("discard_passdown");
2762 DMEMIT("no_discard_passdown");
2766 case STATUSTYPE_TABLE:
2767 DMEMIT("%s %s %lu %llu ",
2768 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2769 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2770 (unsigned long)pool->sectors_per_block,
2771 (unsigned long long)pt->low_water_blocks);
2772 emit_flags(&pt->requested_pf, result, sz, maxlen);
2779 static int pool_iterate_devices(struct dm_target *ti,
2780 iterate_devices_callout_fn fn, void *data)
2782 struct pool_c *pt = ti->private;
2784 return fn(ti, pt->data_dev, 0, ti->len, data);
2787 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2788 struct bio_vec *biovec, int max_size)
2790 struct pool_c *pt = ti->private;
2791 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2793 if (!q->merge_bvec_fn)
2796 bvm->bi_bdev = pt->data_dev->bdev;
2798 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2801 static bool block_size_is_power_of_two(struct pool *pool)
2803 return pool->sectors_per_block_shift >= 0;
2806 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2808 struct pool *pool = pt->pool;
2809 struct queue_limits *data_limits;
2811 limits->max_discard_sectors = pool->sectors_per_block;
2814 * discard_granularity is just a hint, and not enforced.
2816 if (pt->adjusted_pf.discard_passdown) {
2817 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2818 limits->discard_granularity = data_limits->discard_granularity;
2819 } else if (block_size_is_power_of_two(pool))
2820 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2823 * Use largest power of 2 that is a factor of sectors_per_block
2824 * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
2826 limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
2827 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
2830 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2832 struct pool_c *pt = ti->private;
2833 struct pool *pool = pt->pool;
2835 blk_limits_io_min(limits, 0);
2836 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2839 * pt->adjusted_pf is a staging area for the actual features to use.
2840 * They get transferred to the live pool in bind_control_target()
2841 * called from pool_preresume().
2843 if (!pt->adjusted_pf.discard_enabled)
2846 disable_passdown_if_not_supported(pt);
2848 set_discard_limits(pt, limits);
2851 static struct target_type pool_target = {
2852 .name = "thin-pool",
2853 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2854 DM_TARGET_IMMUTABLE,
2855 .version = {1, 4, 0},
2856 .module = THIS_MODULE,
2860 .postsuspend = pool_postsuspend,
2861 .preresume = pool_preresume,
2862 .resume = pool_resume,
2863 .message = pool_message,
2864 .status = pool_status,
2865 .merge = pool_merge,
2866 .iterate_devices = pool_iterate_devices,
2867 .io_hints = pool_io_hints,
2870 /*----------------------------------------------------------------
2871 * Thin target methods
2872 *--------------------------------------------------------------*/
2873 static void thin_dtr(struct dm_target *ti)
2875 struct thin_c *tc = ti->private;
2877 mutex_lock(&dm_thin_pool_table.mutex);
2879 __pool_dec(tc->pool);
2880 dm_pool_close_thin_device(tc->td);
2881 dm_put_device(ti, tc->pool_dev);
2883 dm_put_device(ti, tc->origin_dev);
2886 mutex_unlock(&dm_thin_pool_table.mutex);
2890 * Thin target parameters:
2892 * <pool_dev> <dev_id> [origin_dev]
2894 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2895 * dev_id: the internal device identifier
2896 * origin_dev: a device external to the pool that should act as the origin
2898 * If the pool device has discards disabled, they get disabled for the thin
2901 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2905 struct dm_dev *pool_dev, *origin_dev;
2906 struct mapped_device *pool_md;
2908 mutex_lock(&dm_thin_pool_table.mutex);
2910 if (argc != 2 && argc != 3) {
2911 ti->error = "Invalid argument count";
2916 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2918 ti->error = "Out of memory";
2924 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2926 ti->error = "Error opening origin device";
2927 goto bad_origin_dev;
2929 tc->origin_dev = origin_dev;
2932 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2934 ti->error = "Error opening pool device";
2937 tc->pool_dev = pool_dev;
2939 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2940 ti->error = "Invalid device id";
2945 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2947 ti->error = "Couldn't get pool mapped device";
2952 tc->pool = __pool_table_lookup(pool_md);
2954 ti->error = "Couldn't find pool object";
2956 goto bad_pool_lookup;
2958 __pool_inc(tc->pool);
2960 if (get_pool_mode(tc->pool) == PM_FAIL) {
2961 ti->error = "Couldn't open thin device, Pool is in fail mode";
2965 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2967 ti->error = "Couldn't open thin internal device";
2971 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2975 ti->num_flush_requests = 1;
2976 ti->flush_supported = true;
2978 /* In case the pool supports discards, pass them on. */
2979 if (tc->pool->pf.discard_enabled) {
2980 ti->discards_supported = true;
2981 ti->num_discard_requests = 1;
2982 ti->discard_zeroes_data_unsupported = true;
2983 /* Discard requests must be split on a block boundary */
2984 ti->split_discard_requests = true;
2989 mutex_unlock(&dm_thin_pool_table.mutex);
2994 __pool_dec(tc->pool);
2998 dm_put_device(ti, tc->pool_dev);
3001 dm_put_device(ti, tc->origin_dev);
3005 mutex_unlock(&dm_thin_pool_table.mutex);
3010 static int thin_map(struct dm_target *ti, struct bio *bio,
3011 union map_info *map_context)
3013 bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
3015 return thin_bio_map(ti, bio, map_context);
3018 static int thin_endio(struct dm_target *ti,
3019 struct bio *bio, int err,
3020 union map_info *map_context)
3022 unsigned long flags;
3023 struct dm_thin_endio_hook *h = map_context->ptr;
3024 struct list_head work;
3025 struct dm_thin_new_mapping *m, *tmp;
3026 struct pool *pool = h->tc->pool;
3028 if (h->shared_read_entry) {
3029 INIT_LIST_HEAD(&work);
3030 dm_deferred_entry_dec(h->shared_read_entry, &work);
3032 spin_lock_irqsave(&pool->lock, flags);
3033 list_for_each_entry_safe(m, tmp, &work, list) {
3036 __maybe_add_mapping(m);
3038 spin_unlock_irqrestore(&pool->lock, flags);
3041 if (h->all_io_entry) {
3042 INIT_LIST_HEAD(&work);
3043 dm_deferred_entry_dec(h->all_io_entry, &work);
3044 spin_lock_irqsave(&pool->lock, flags);
3045 list_for_each_entry_safe(m, tmp, &work, list)
3046 list_add(&m->list, &pool->prepared_discards);
3047 spin_unlock_irqrestore(&pool->lock, flags);
3050 mempool_free(h, pool->endio_hook_pool);
3055 static void thin_postsuspend(struct dm_target *ti)
3057 if (dm_noflush_suspending(ti))
3058 requeue_io((struct thin_c *)ti->private);
3062 * <nr mapped sectors> <highest mapped sector>
3064 static int thin_status(struct dm_target *ti, status_type_t type,
3065 unsigned status_flags, char *result, unsigned maxlen)
3069 dm_block_t mapped, highest;
3070 char buf[BDEVNAME_SIZE];
3071 struct thin_c *tc = ti->private;
3073 if (get_pool_mode(tc->pool) == PM_FAIL) {
3082 case STATUSTYPE_INFO:
3083 r = dm_thin_get_mapped_count(tc->td, &mapped);
3087 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3091 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3093 DMEMIT("%llu", ((highest + 1) *
3094 tc->pool->sectors_per_block) - 1);
3099 case STATUSTYPE_TABLE:
3101 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3102 (unsigned long) tc->dev_id);
3104 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3112 static int thin_iterate_devices(struct dm_target *ti,
3113 iterate_devices_callout_fn fn, void *data)
3116 struct thin_c *tc = ti->private;
3117 struct pool *pool = tc->pool;
3120 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3121 * we follow a more convoluted path through to the pool's target.
3124 return 0; /* nothing is bound */
3126 blocks = pool->ti->len;
3127 (void) sector_div(blocks, pool->sectors_per_block);
3129 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
3135 * A thin device always inherits its queue limits from its pool.
3137 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3139 struct thin_c *tc = ti->private;
3141 *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
3144 static struct target_type thin_target = {
3146 .version = {1, 4, 0},
3147 .module = THIS_MODULE,
3151 .end_io = thin_endio,
3152 .postsuspend = thin_postsuspend,
3153 .status = thin_status,
3154 .iterate_devices = thin_iterate_devices,
3155 .io_hints = thin_io_hints,
3158 /*----------------------------------------------------------------*/
3160 static int __init dm_thin_init(void)
3166 r = dm_register_target(&thin_target);
3170 r = dm_register_target(&pool_target);
3172 goto bad_pool_target;
3176 dm_bio_prison_init();
3178 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3179 if (!_new_mapping_cache)
3180 goto bad_new_mapping_cache;
3182 _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
3183 if (!_endio_hook_cache)
3184 goto bad_endio_hook_cache;
3188 bad_endio_hook_cache:
3189 kmem_cache_destroy(_new_mapping_cache);
3190 bad_new_mapping_cache:
3191 dm_unregister_target(&pool_target);
3193 dm_unregister_target(&thin_target);
3198 static void dm_thin_exit(void)
3200 dm_unregister_target(&thin_target);
3201 dm_unregister_target(&pool_target);
3203 dm_bio_prison_exit();
3204 kmem_cache_destroy(_new_mapping_cache);
3205 kmem_cache_destroy(_endio_hook_cache);
3208 module_init(dm_thin_init);
3209 module_exit(dm_thin_exit);
3211 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3212 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3213 MODULE_LICENSE("GPL");