dm thin: synchronize the pool mode during suspend
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX   "thin"
20
21 /*
22  * Tunable constants
23  */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
30                 "A percentage of time allocated for copy on write");
31
32 /*
33  * The block size of the device holding pool data must be
34  * between 64KB and 1GB.
35  */
36 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
37 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
38
39 /*
40  * Device id is restricted to 24 bits.
41  */
42 #define MAX_DEV_ID ((1 << 24) - 1)
43
44 /*
45  * How do we handle breaking sharing of data blocks?
46  * =================================================
47  *
48  * We use a standard copy-on-write btree to store the mappings for the
49  * devices (note I'm talking about copy-on-write of the metadata here, not
50  * the data).  When you take an internal snapshot you clone the root node
51  * of the origin btree.  After this there is no concept of an origin or a
52  * snapshot.  They are just two device trees that happen to point to the
53  * same data blocks.
54  *
55  * When we get a write in we decide if it's to a shared data block using
56  * some timestamp magic.  If it is, we have to break sharing.
57  *
58  * Let's say we write to a shared block in what was the origin.  The
59  * steps are:
60  *
61  * i) plug io further to this physical block. (see bio_prison code).
62  *
63  * ii) quiesce any read io to that shared data block.  Obviously
64  * including all devices that share this block.  (see dm_deferred_set code)
65  *
66  * iii) copy the data block to a newly allocate block.  This step can be
67  * missed out if the io covers the block. (schedule_copy).
68  *
69  * iv) insert the new mapping into the origin's btree
70  * (process_prepared_mapping).  This act of inserting breaks some
71  * sharing of btree nodes between the two devices.  Breaking sharing only
72  * effects the btree of that specific device.  Btrees for the other
73  * devices that share the block never change.  The btree for the origin
74  * device as it was after the last commit is untouched, ie. we're using
75  * persistent data structures in the functional programming sense.
76  *
77  * v) unplug io to this physical block, including the io that triggered
78  * the breaking of sharing.
79  *
80  * Steps (ii) and (iii) occur in parallel.
81  *
82  * The metadata _doesn't_ need to be committed before the io continues.  We
83  * get away with this because the io is always written to a _new_ block.
84  * If there's a crash, then:
85  *
86  * - The origin mapping will point to the old origin block (the shared
87  * one).  This will contain the data as it was before the io that triggered
88  * the breaking of sharing came in.
89  *
90  * - The snap mapping still points to the old block.  As it would after
91  * the commit.
92  *
93  * The downside of this scheme is the timestamp magic isn't perfect, and
94  * will continue to think that data block in the snapshot device is shared
95  * even after the write to the origin has broken sharing.  I suspect data
96  * blocks will typically be shared by many different devices, so we're
97  * breaking sharing n + 1 times, rather than n, where n is the number of
98  * devices that reference this data block.  At the moment I think the
99  * benefits far, far outweigh the disadvantages.
100  */
101
102 /*----------------------------------------------------------------*/
103
104 /*
105  * Key building.
106  */
107 static void build_data_key(struct dm_thin_device *td,
108                            dm_block_t b, struct dm_cell_key *key)
109 {
110         key->virtual = 0;
111         key->dev = dm_thin_dev_id(td);
112         key->block = b;
113 }
114
115 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116                               struct dm_cell_key *key)
117 {
118         key->virtual = 1;
119         key->dev = dm_thin_dev_id(td);
120         key->block = b;
121 }
122
123 /*----------------------------------------------------------------*/
124
125 /*
126  * A pool device ties together a metadata device and a data device.  It
127  * also provides the interface for creating and destroying internal
128  * devices.
129  */
130 struct dm_thin_new_mapping;
131
132 /*
133  * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
134  */
135 enum pool_mode {
136         PM_WRITE,               /* metadata may be changed */
137         PM_READ_ONLY,           /* metadata may not be changed */
138         PM_FAIL,                /* all I/O fails */
139 };
140
141 struct pool_features {
142         enum pool_mode mode;
143
144         bool zero_new_blocks:1;
145         bool discard_enabled:1;
146         bool discard_passdown:1;
147         bool error_if_no_space:1;
148 };
149
150 struct thin_c;
151 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
152 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
153
154 struct pool {
155         struct list_head list;
156         struct dm_target *ti;   /* Only set if a pool target is bound */
157
158         struct mapped_device *pool_md;
159         struct block_device *md_dev;
160         struct dm_pool_metadata *pmd;
161
162         dm_block_t low_water_blocks;
163         uint32_t sectors_per_block;
164         int sectors_per_block_shift;
165
166         struct pool_features pf;
167         bool low_water_triggered:1;     /* A dm event has been sent */
168
169         struct dm_bio_prison *prison;
170         struct dm_kcopyd_client *copier;
171
172         struct workqueue_struct *wq;
173         struct work_struct worker;
174         struct delayed_work waker;
175
176         unsigned long last_commit_jiffies;
177         unsigned ref_count;
178
179         spinlock_t lock;
180         struct bio_list deferred_bios;
181         struct bio_list deferred_flush_bios;
182         struct list_head prepared_mappings;
183         struct list_head prepared_discards;
184
185         struct bio_list retry_on_resume_list;
186
187         struct dm_deferred_set *shared_read_ds;
188         struct dm_deferred_set *all_io_ds;
189
190         struct dm_thin_new_mapping *next_mapping;
191         mempool_t *mapping_pool;
192
193         process_bio_fn process_bio;
194         process_bio_fn process_discard;
195
196         process_mapping_fn process_prepared_mapping;
197         process_mapping_fn process_prepared_discard;
198 };
199
200 static enum pool_mode get_pool_mode(struct pool *pool);
201 static void out_of_data_space(struct pool *pool);
202 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
203
204 /*
205  * Target context for a pool.
206  */
207 struct pool_c {
208         struct dm_target *ti;
209         struct pool *pool;
210         struct dm_dev *data_dev;
211         struct dm_dev *metadata_dev;
212         struct dm_target_callbacks callbacks;
213
214         dm_block_t low_water_blocks;
215         struct pool_features requested_pf; /* Features requested during table load */
216         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
217 };
218
219 /*
220  * Target context for a thin.
221  */
222 struct thin_c {
223         struct dm_dev *pool_dev;
224         struct dm_dev *origin_dev;
225         dm_thin_id dev_id;
226
227         struct pool *pool;
228         struct dm_thin_device *td;
229 };
230
231 /*----------------------------------------------------------------*/
232
233 /*
234  * wake_worker() is used when new work is queued and when pool_resume is
235  * ready to continue deferred IO processing.
236  */
237 static void wake_worker(struct pool *pool)
238 {
239         queue_work(pool->wq, &pool->worker);
240 }
241
242 /*----------------------------------------------------------------*/
243
244 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
245                       struct dm_bio_prison_cell **cell_result)
246 {
247         int r;
248         struct dm_bio_prison_cell *cell_prealloc;
249
250         /*
251          * Allocate a cell from the prison's mempool.
252          * This might block but it can't fail.
253          */
254         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
255
256         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
257         if (r)
258                 /*
259                  * We reused an old cell; we can get rid of
260                  * the new one.
261                  */
262                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
263
264         return r;
265 }
266
267 static void cell_release(struct pool *pool,
268                          struct dm_bio_prison_cell *cell,
269                          struct bio_list *bios)
270 {
271         dm_cell_release(pool->prison, cell, bios);
272         dm_bio_prison_free_cell(pool->prison, cell);
273 }
274
275 static void cell_release_no_holder(struct pool *pool,
276                                    struct dm_bio_prison_cell *cell,
277                                    struct bio_list *bios)
278 {
279         dm_cell_release_no_holder(pool->prison, cell, bios);
280         dm_bio_prison_free_cell(pool->prison, cell);
281 }
282
283 static void cell_defer_no_holder_no_free(struct thin_c *tc,
284                                          struct dm_bio_prison_cell *cell)
285 {
286         struct pool *pool = tc->pool;
287         unsigned long flags;
288
289         spin_lock_irqsave(&pool->lock, flags);
290         dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
291         spin_unlock_irqrestore(&pool->lock, flags);
292
293         wake_worker(pool);
294 }
295
296 static void cell_error(struct pool *pool,
297                        struct dm_bio_prison_cell *cell)
298 {
299         dm_cell_error(pool->prison, cell);
300         dm_bio_prison_free_cell(pool->prison, cell);
301 }
302
303 /*----------------------------------------------------------------*/
304
305 /*
306  * A global list of pools that uses a struct mapped_device as a key.
307  */
308 static struct dm_thin_pool_table {
309         struct mutex mutex;
310         struct list_head pools;
311 } dm_thin_pool_table;
312
313 static void pool_table_init(void)
314 {
315         mutex_init(&dm_thin_pool_table.mutex);
316         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
317 }
318
319 static void __pool_table_insert(struct pool *pool)
320 {
321         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
322         list_add(&pool->list, &dm_thin_pool_table.pools);
323 }
324
325 static void __pool_table_remove(struct pool *pool)
326 {
327         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
328         list_del(&pool->list);
329 }
330
331 static struct pool *__pool_table_lookup(struct mapped_device *md)
332 {
333         struct pool *pool = NULL, *tmp;
334
335         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
336
337         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
338                 if (tmp->pool_md == md) {
339                         pool = tmp;
340                         break;
341                 }
342         }
343
344         return pool;
345 }
346
347 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
348 {
349         struct pool *pool = NULL, *tmp;
350
351         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
352
353         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
354                 if (tmp->md_dev == md_dev) {
355                         pool = tmp;
356                         break;
357                 }
358         }
359
360         return pool;
361 }
362
363 /*----------------------------------------------------------------*/
364
365 struct dm_thin_endio_hook {
366         struct thin_c *tc;
367         struct dm_deferred_entry *shared_read_entry;
368         struct dm_deferred_entry *all_io_entry;
369         struct dm_thin_new_mapping *overwrite_mapping;
370 };
371
372 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
373 {
374         struct bio *bio;
375         struct bio_list bios;
376
377         bio_list_init(&bios);
378         bio_list_merge(&bios, master);
379         bio_list_init(master);
380
381         while ((bio = bio_list_pop(&bios))) {
382                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
383
384                 if (h->tc == tc)
385                         bio_endio(bio, DM_ENDIO_REQUEUE);
386                 else
387                         bio_list_add(master, bio);
388         }
389 }
390
391 static void requeue_io(struct thin_c *tc)
392 {
393         struct pool *pool = tc->pool;
394         unsigned long flags;
395
396         spin_lock_irqsave(&pool->lock, flags);
397         __requeue_bio_list(tc, &pool->deferred_bios);
398         __requeue_bio_list(tc, &pool->retry_on_resume_list);
399         spin_unlock_irqrestore(&pool->lock, flags);
400 }
401
402 /*
403  * This section of code contains the logic for processing a thin device's IO.
404  * Much of the code depends on pool object resources (lists, workqueues, etc)
405  * but most is exclusively called from the thin target rather than the thin-pool
406  * target.
407  */
408
409 static bool block_size_is_power_of_two(struct pool *pool)
410 {
411         return pool->sectors_per_block_shift >= 0;
412 }
413
414 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
415 {
416         struct pool *pool = tc->pool;
417         sector_t block_nr = bio->bi_iter.bi_sector;
418
419         if (block_size_is_power_of_two(pool))
420                 block_nr >>= pool->sectors_per_block_shift;
421         else
422                 (void) sector_div(block_nr, pool->sectors_per_block);
423
424         return block_nr;
425 }
426
427 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
428 {
429         struct pool *pool = tc->pool;
430         sector_t bi_sector = bio->bi_iter.bi_sector;
431
432         bio->bi_bdev = tc->pool_dev->bdev;
433         if (block_size_is_power_of_two(pool))
434                 bio->bi_iter.bi_sector =
435                         (block << pool->sectors_per_block_shift) |
436                         (bi_sector & (pool->sectors_per_block - 1));
437         else
438                 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
439                                  sector_div(bi_sector, pool->sectors_per_block);
440 }
441
442 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
443 {
444         bio->bi_bdev = tc->origin_dev->bdev;
445 }
446
447 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
448 {
449         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
450                 dm_thin_changed_this_transaction(tc->td);
451 }
452
453 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
454 {
455         struct dm_thin_endio_hook *h;
456
457         if (bio->bi_rw & REQ_DISCARD)
458                 return;
459
460         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
461         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
462 }
463
464 static void issue(struct thin_c *tc, struct bio *bio)
465 {
466         struct pool *pool = tc->pool;
467         unsigned long flags;
468
469         if (!bio_triggers_commit(tc, bio)) {
470                 generic_make_request(bio);
471                 return;
472         }
473
474         /*
475          * Complete bio with an error if earlier I/O caused changes to
476          * the metadata that can't be committed e.g, due to I/O errors
477          * on the metadata device.
478          */
479         if (dm_thin_aborted_changes(tc->td)) {
480                 bio_io_error(bio);
481                 return;
482         }
483
484         /*
485          * Batch together any bios that trigger commits and then issue a
486          * single commit for them in process_deferred_bios().
487          */
488         spin_lock_irqsave(&pool->lock, flags);
489         bio_list_add(&pool->deferred_flush_bios, bio);
490         spin_unlock_irqrestore(&pool->lock, flags);
491 }
492
493 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
494 {
495         remap_to_origin(tc, bio);
496         issue(tc, bio);
497 }
498
499 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
500                             dm_block_t block)
501 {
502         remap(tc, bio, block);
503         issue(tc, bio);
504 }
505
506 /*----------------------------------------------------------------*/
507
508 /*
509  * Bio endio functions.
510  */
511 struct dm_thin_new_mapping {
512         struct list_head list;
513
514         bool quiesced:1;
515         bool prepared:1;
516         bool pass_discard:1;
517         bool definitely_not_shared:1;
518
519         int err;
520         struct thin_c *tc;
521         dm_block_t virt_block;
522         dm_block_t data_block;
523         struct dm_bio_prison_cell *cell, *cell2;
524
525         /*
526          * If the bio covers the whole area of a block then we can avoid
527          * zeroing or copying.  Instead this bio is hooked.  The bio will
528          * still be in the cell, so care has to be taken to avoid issuing
529          * the bio twice.
530          */
531         struct bio *bio;
532         bio_end_io_t *saved_bi_end_io;
533 };
534
535 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
536 {
537         struct pool *pool = m->tc->pool;
538
539         if (m->quiesced && m->prepared) {
540                 list_add_tail(&m->list, &pool->prepared_mappings);
541                 wake_worker(pool);
542         }
543 }
544
545 static void copy_complete(int read_err, unsigned long write_err, void *context)
546 {
547         unsigned long flags;
548         struct dm_thin_new_mapping *m = context;
549         struct pool *pool = m->tc->pool;
550
551         m->err = read_err || write_err ? -EIO : 0;
552
553         spin_lock_irqsave(&pool->lock, flags);
554         m->prepared = true;
555         __maybe_add_mapping(m);
556         spin_unlock_irqrestore(&pool->lock, flags);
557 }
558
559 static void overwrite_endio(struct bio *bio, int err)
560 {
561         unsigned long flags;
562         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
563         struct dm_thin_new_mapping *m = h->overwrite_mapping;
564         struct pool *pool = m->tc->pool;
565
566         m->err = err;
567
568         spin_lock_irqsave(&pool->lock, flags);
569         m->prepared = true;
570         __maybe_add_mapping(m);
571         spin_unlock_irqrestore(&pool->lock, flags);
572 }
573
574 /*----------------------------------------------------------------*/
575
576 /*
577  * Workqueue.
578  */
579
580 /*
581  * Prepared mapping jobs.
582  */
583
584 /*
585  * This sends the bios in the cell back to the deferred_bios list.
586  */
587 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
588 {
589         struct pool *pool = tc->pool;
590         unsigned long flags;
591
592         spin_lock_irqsave(&pool->lock, flags);
593         cell_release(pool, cell, &pool->deferred_bios);
594         spin_unlock_irqrestore(&tc->pool->lock, flags);
595
596         wake_worker(pool);
597 }
598
599 /*
600  * Same as cell_defer above, except it omits the original holder of the cell.
601  */
602 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
603 {
604         struct pool *pool = tc->pool;
605         unsigned long flags;
606
607         spin_lock_irqsave(&pool->lock, flags);
608         cell_release_no_holder(pool, cell, &pool->deferred_bios);
609         spin_unlock_irqrestore(&pool->lock, flags);
610
611         wake_worker(pool);
612 }
613
614 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
615 {
616         if (m->bio) {
617                 m->bio->bi_end_io = m->saved_bi_end_io;
618                 atomic_inc(&m->bio->bi_remaining);
619         }
620         cell_error(m->tc->pool, m->cell);
621         list_del(&m->list);
622         mempool_free(m, m->tc->pool->mapping_pool);
623 }
624
625 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
626 {
627         struct thin_c *tc = m->tc;
628         struct pool *pool = tc->pool;
629         struct bio *bio;
630         int r;
631
632         bio = m->bio;
633         if (bio) {
634                 bio->bi_end_io = m->saved_bi_end_io;
635                 atomic_inc(&bio->bi_remaining);
636         }
637
638         if (m->err) {
639                 cell_error(pool, m->cell);
640                 goto out;
641         }
642
643         /*
644          * Commit the prepared block into the mapping btree.
645          * Any I/O for this block arriving after this point will get
646          * remapped to it directly.
647          */
648         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
649         if (r) {
650                 metadata_operation_failed(pool, "dm_thin_insert_block", r);
651                 cell_error(pool, m->cell);
652                 goto out;
653         }
654
655         /*
656          * Release any bios held while the block was being provisioned.
657          * If we are processing a write bio that completely covers the block,
658          * we already processed it so can ignore it now when processing
659          * the bios in the cell.
660          */
661         if (bio) {
662                 cell_defer_no_holder(tc, m->cell);
663                 bio_endio(bio, 0);
664         } else
665                 cell_defer(tc, m->cell);
666
667 out:
668         list_del(&m->list);
669         mempool_free(m, pool->mapping_pool);
670 }
671
672 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
673 {
674         struct thin_c *tc = m->tc;
675
676         bio_io_error(m->bio);
677         cell_defer_no_holder(tc, m->cell);
678         cell_defer_no_holder(tc, m->cell2);
679         mempool_free(m, tc->pool->mapping_pool);
680 }
681
682 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
683 {
684         struct thin_c *tc = m->tc;
685
686         inc_all_io_entry(tc->pool, m->bio);
687         cell_defer_no_holder(tc, m->cell);
688         cell_defer_no_holder(tc, m->cell2);
689
690         if (m->pass_discard)
691                 if (m->definitely_not_shared)
692                         remap_and_issue(tc, m->bio, m->data_block);
693                 else {
694                         bool used = false;
695                         if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
696                                 bio_endio(m->bio, 0);
697                         else
698                                 remap_and_issue(tc, m->bio, m->data_block);
699                 }
700         else
701                 bio_endio(m->bio, 0);
702
703         mempool_free(m, tc->pool->mapping_pool);
704 }
705
706 static void process_prepared_discard(struct dm_thin_new_mapping *m)
707 {
708         int r;
709         struct thin_c *tc = m->tc;
710
711         r = dm_thin_remove_block(tc->td, m->virt_block);
712         if (r)
713                 DMERR_LIMIT("dm_thin_remove_block() failed");
714
715         process_prepared_discard_passdown(m);
716 }
717
718 static void process_prepared(struct pool *pool, struct list_head *head,
719                              process_mapping_fn *fn)
720 {
721         unsigned long flags;
722         struct list_head maps;
723         struct dm_thin_new_mapping *m, *tmp;
724
725         INIT_LIST_HEAD(&maps);
726         spin_lock_irqsave(&pool->lock, flags);
727         list_splice_init(head, &maps);
728         spin_unlock_irqrestore(&pool->lock, flags);
729
730         list_for_each_entry_safe(m, tmp, &maps, list)
731                 (*fn)(m);
732 }
733
734 /*
735  * Deferred bio jobs.
736  */
737 static int io_overlaps_block(struct pool *pool, struct bio *bio)
738 {
739         return bio->bi_iter.bi_size ==
740                 (pool->sectors_per_block << SECTOR_SHIFT);
741 }
742
743 static int io_overwrites_block(struct pool *pool, struct bio *bio)
744 {
745         return (bio_data_dir(bio) == WRITE) &&
746                 io_overlaps_block(pool, bio);
747 }
748
749 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
750                                bio_end_io_t *fn)
751 {
752         *save = bio->bi_end_io;
753         bio->bi_end_io = fn;
754 }
755
756 static int ensure_next_mapping(struct pool *pool)
757 {
758         if (pool->next_mapping)
759                 return 0;
760
761         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
762
763         return pool->next_mapping ? 0 : -ENOMEM;
764 }
765
766 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
767 {
768         struct dm_thin_new_mapping *m = pool->next_mapping;
769
770         BUG_ON(!pool->next_mapping);
771
772         memset(m, 0, sizeof(struct dm_thin_new_mapping));
773         INIT_LIST_HEAD(&m->list);
774         m->bio = NULL;
775
776         pool->next_mapping = NULL;
777
778         return m;
779 }
780
781 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
782                           struct dm_dev *origin, dm_block_t data_origin,
783                           dm_block_t data_dest,
784                           struct dm_bio_prison_cell *cell, struct bio *bio)
785 {
786         int r;
787         struct pool *pool = tc->pool;
788         struct dm_thin_new_mapping *m = get_next_mapping(pool);
789
790         m->tc = tc;
791         m->virt_block = virt_block;
792         m->data_block = data_dest;
793         m->cell = cell;
794
795         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
796                 m->quiesced = true;
797
798         /*
799          * IO to pool_dev remaps to the pool target's data_dev.
800          *
801          * If the whole block of data is being overwritten, we can issue the
802          * bio immediately. Otherwise we use kcopyd to clone the data first.
803          */
804         if (io_overwrites_block(pool, bio)) {
805                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
806
807                 h->overwrite_mapping = m;
808                 m->bio = bio;
809                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
810                 inc_all_io_entry(pool, bio);
811                 remap_and_issue(tc, bio, data_dest);
812         } else {
813                 struct dm_io_region from, to;
814
815                 from.bdev = origin->bdev;
816                 from.sector = data_origin * pool->sectors_per_block;
817                 from.count = pool->sectors_per_block;
818
819                 to.bdev = tc->pool_dev->bdev;
820                 to.sector = data_dest * pool->sectors_per_block;
821                 to.count = pool->sectors_per_block;
822
823                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
824                                    0, copy_complete, m);
825                 if (r < 0) {
826                         mempool_free(m, pool->mapping_pool);
827                         DMERR_LIMIT("dm_kcopyd_copy() failed");
828                         cell_error(pool, cell);
829                 }
830         }
831 }
832
833 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
834                                    dm_block_t data_origin, dm_block_t data_dest,
835                                    struct dm_bio_prison_cell *cell, struct bio *bio)
836 {
837         schedule_copy(tc, virt_block, tc->pool_dev,
838                       data_origin, data_dest, cell, bio);
839 }
840
841 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
842                                    dm_block_t data_dest,
843                                    struct dm_bio_prison_cell *cell, struct bio *bio)
844 {
845         schedule_copy(tc, virt_block, tc->origin_dev,
846                       virt_block, data_dest, cell, bio);
847 }
848
849 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
850                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
851                           struct bio *bio)
852 {
853         struct pool *pool = tc->pool;
854         struct dm_thin_new_mapping *m = get_next_mapping(pool);
855
856         m->quiesced = true;
857         m->prepared = false;
858         m->tc = tc;
859         m->virt_block = virt_block;
860         m->data_block = data_block;
861         m->cell = cell;
862
863         /*
864          * If the whole block of data is being overwritten or we are not
865          * zeroing pre-existing data, we can issue the bio immediately.
866          * Otherwise we use kcopyd to zero the data first.
867          */
868         if (!pool->pf.zero_new_blocks)
869                 process_prepared_mapping(m);
870
871         else if (io_overwrites_block(pool, bio)) {
872                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
873
874                 h->overwrite_mapping = m;
875                 m->bio = bio;
876                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
877                 inc_all_io_entry(pool, bio);
878                 remap_and_issue(tc, bio, data_block);
879         } else {
880                 int r;
881                 struct dm_io_region to;
882
883                 to.bdev = tc->pool_dev->bdev;
884                 to.sector = data_block * pool->sectors_per_block;
885                 to.count = pool->sectors_per_block;
886
887                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
888                 if (r < 0) {
889                         mempool_free(m, pool->mapping_pool);
890                         DMERR_LIMIT("dm_kcopyd_zero() failed");
891                         cell_error(pool, cell);
892                 }
893         }
894 }
895
896 /*
897  * A non-zero return indicates read_only or fail_io mode.
898  * Many callers don't care about the return value.
899  */
900 static int commit(struct pool *pool)
901 {
902         int r;
903
904         if (get_pool_mode(pool) != PM_WRITE)
905                 return -EINVAL;
906
907         r = dm_pool_commit_metadata(pool->pmd);
908         if (r)
909                 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
910
911         return r;
912 }
913
914 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
915 {
916         unsigned long flags;
917
918         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
919                 DMWARN("%s: reached low water mark for data device: sending event.",
920                        dm_device_name(pool->pool_md));
921                 spin_lock_irqsave(&pool->lock, flags);
922                 pool->low_water_triggered = true;
923                 spin_unlock_irqrestore(&pool->lock, flags);
924                 dm_table_event(pool->ti->table);
925         }
926 }
927
928 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
929 {
930         int r;
931         dm_block_t free_blocks;
932         struct pool *pool = tc->pool;
933
934         if (get_pool_mode(pool) != PM_WRITE)
935                 return -EINVAL;
936
937         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
938         if (r) {
939                 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
940                 return r;
941         }
942
943         check_low_water_mark(pool, free_blocks);
944
945         if (!free_blocks) {
946                 /*
947                  * Try to commit to see if that will free up some
948                  * more space.
949                  */
950                 r = commit(pool);
951                 if (r)
952                         return r;
953
954                 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
955                 if (r) {
956                         metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
957                         return r;
958                 }
959
960                 if (!free_blocks) {
961                         out_of_data_space(pool);
962                         return -ENOSPC;
963                 }
964         }
965
966         r = dm_pool_alloc_data_block(pool->pmd, result);
967         if (r) {
968                 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
969                 return r;
970         }
971
972         return 0;
973 }
974
975 /*
976  * If we have run out of space, queue bios until the device is
977  * resumed, presumably after having been reloaded with more space.
978  */
979 static void retry_on_resume(struct bio *bio)
980 {
981         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
982         struct thin_c *tc = h->tc;
983         struct pool *pool = tc->pool;
984         unsigned long flags;
985
986         spin_lock_irqsave(&pool->lock, flags);
987         bio_list_add(&pool->retry_on_resume_list, bio);
988         spin_unlock_irqrestore(&pool->lock, flags);
989 }
990
991 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
992 {
993         /*
994          * When pool is read-only, no cell locking is needed because
995          * nothing is changing.
996          */
997         WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
998
999         if (pool->pf.error_if_no_space)
1000                 bio_io_error(bio);
1001         else
1002                 retry_on_resume(bio);
1003 }
1004
1005 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1006 {
1007         struct bio *bio;
1008         struct bio_list bios;
1009
1010         bio_list_init(&bios);
1011         cell_release(pool, cell, &bios);
1012
1013         while ((bio = bio_list_pop(&bios)))
1014                 handle_unserviceable_bio(pool, bio);
1015 }
1016
1017 static void process_discard(struct thin_c *tc, struct bio *bio)
1018 {
1019         int r;
1020         unsigned long flags;
1021         struct pool *pool = tc->pool;
1022         struct dm_bio_prison_cell *cell, *cell2;
1023         struct dm_cell_key key, key2;
1024         dm_block_t block = get_bio_block(tc, bio);
1025         struct dm_thin_lookup_result lookup_result;
1026         struct dm_thin_new_mapping *m;
1027
1028         build_virtual_key(tc->td, block, &key);
1029         if (bio_detain(tc->pool, &key, bio, &cell))
1030                 return;
1031
1032         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1033         switch (r) {
1034         case 0:
1035                 /*
1036                  * Check nobody is fiddling with this pool block.  This can
1037                  * happen if someone's in the process of breaking sharing
1038                  * on this block.
1039                  */
1040                 build_data_key(tc->td, lookup_result.block, &key2);
1041                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1042                         cell_defer_no_holder(tc, cell);
1043                         break;
1044                 }
1045
1046                 if (io_overlaps_block(pool, bio)) {
1047                         /*
1048                          * IO may still be going to the destination block.  We must
1049                          * quiesce before we can do the removal.
1050                          */
1051                         m = get_next_mapping(pool);
1052                         m->tc = tc;
1053                         m->pass_discard = pool->pf.discard_passdown;
1054                         m->definitely_not_shared = !lookup_result.shared;
1055                         m->virt_block = block;
1056                         m->data_block = lookup_result.block;
1057                         m->cell = cell;
1058                         m->cell2 = cell2;
1059                         m->bio = bio;
1060
1061                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1062                                 spin_lock_irqsave(&pool->lock, flags);
1063                                 list_add_tail(&m->list, &pool->prepared_discards);
1064                                 spin_unlock_irqrestore(&pool->lock, flags);
1065                                 wake_worker(pool);
1066                         }
1067                 } else {
1068                         inc_all_io_entry(pool, bio);
1069                         cell_defer_no_holder(tc, cell);
1070                         cell_defer_no_holder(tc, cell2);
1071
1072                         /*
1073                          * The DM core makes sure that the discard doesn't span
1074                          * a block boundary.  So we submit the discard of a
1075                          * partial block appropriately.
1076                          */
1077                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1078                                 remap_and_issue(tc, bio, lookup_result.block);
1079                         else
1080                                 bio_endio(bio, 0);
1081                 }
1082                 break;
1083
1084         case -ENODATA:
1085                 /*
1086                  * It isn't provisioned, just forget it.
1087                  */
1088                 cell_defer_no_holder(tc, cell);
1089                 bio_endio(bio, 0);
1090                 break;
1091
1092         default:
1093                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1094                             __func__, r);
1095                 cell_defer_no_holder(tc, cell);
1096                 bio_io_error(bio);
1097                 break;
1098         }
1099 }
1100
1101 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1102                           struct dm_cell_key *key,
1103                           struct dm_thin_lookup_result *lookup_result,
1104                           struct dm_bio_prison_cell *cell)
1105 {
1106         int r;
1107         dm_block_t data_block;
1108         struct pool *pool = tc->pool;
1109
1110         r = alloc_data_block(tc, &data_block);
1111         switch (r) {
1112         case 0:
1113                 schedule_internal_copy(tc, block, lookup_result->block,
1114                                        data_block, cell, bio);
1115                 break;
1116
1117         case -ENOSPC:
1118                 retry_bios_on_resume(pool, cell);
1119                 break;
1120
1121         default:
1122                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1123                             __func__, r);
1124                 cell_error(pool, cell);
1125                 break;
1126         }
1127 }
1128
1129 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1130                                dm_block_t block,
1131                                struct dm_thin_lookup_result *lookup_result)
1132 {
1133         struct dm_bio_prison_cell *cell;
1134         struct pool *pool = tc->pool;
1135         struct dm_cell_key key;
1136
1137         /*
1138          * If cell is already occupied, then sharing is already in the process
1139          * of being broken so we have nothing further to do here.
1140          */
1141         build_data_key(tc->td, lookup_result->block, &key);
1142         if (bio_detain(pool, &key, bio, &cell))
1143                 return;
1144
1145         if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1146                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1147         else {
1148                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1149
1150                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1151                 inc_all_io_entry(pool, bio);
1152                 cell_defer_no_holder(tc, cell);
1153
1154                 remap_and_issue(tc, bio, lookup_result->block);
1155         }
1156 }
1157
1158 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1159                             struct dm_bio_prison_cell *cell)
1160 {
1161         int r;
1162         dm_block_t data_block;
1163         struct pool *pool = tc->pool;
1164
1165         /*
1166          * Remap empty bios (flushes) immediately, without provisioning.
1167          */
1168         if (!bio->bi_iter.bi_size) {
1169                 inc_all_io_entry(pool, bio);
1170                 cell_defer_no_holder(tc, cell);
1171
1172                 remap_and_issue(tc, bio, 0);
1173                 return;
1174         }
1175
1176         /*
1177          * Fill read bios with zeroes and complete them immediately.
1178          */
1179         if (bio_data_dir(bio) == READ) {
1180                 zero_fill_bio(bio);
1181                 cell_defer_no_holder(tc, cell);
1182                 bio_endio(bio, 0);
1183                 return;
1184         }
1185
1186         r = alloc_data_block(tc, &data_block);
1187         switch (r) {
1188         case 0:
1189                 if (tc->origin_dev)
1190                         schedule_external_copy(tc, block, data_block, cell, bio);
1191                 else
1192                         schedule_zero(tc, block, data_block, cell, bio);
1193                 break;
1194
1195         case -ENOSPC:
1196                 retry_bios_on_resume(pool, cell);
1197                 break;
1198
1199         default:
1200                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1201                             __func__, r);
1202                 cell_error(pool, cell);
1203                 break;
1204         }
1205 }
1206
1207 static void process_bio(struct thin_c *tc, struct bio *bio)
1208 {
1209         int r;
1210         struct pool *pool = tc->pool;
1211         dm_block_t block = get_bio_block(tc, bio);
1212         struct dm_bio_prison_cell *cell;
1213         struct dm_cell_key key;
1214         struct dm_thin_lookup_result lookup_result;
1215
1216         /*
1217          * If cell is already occupied, then the block is already
1218          * being provisioned so we have nothing further to do here.
1219          */
1220         build_virtual_key(tc->td, block, &key);
1221         if (bio_detain(pool, &key, bio, &cell))
1222                 return;
1223
1224         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1225         switch (r) {
1226         case 0:
1227                 if (lookup_result.shared) {
1228                         process_shared_bio(tc, bio, block, &lookup_result);
1229                         cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1230                 } else {
1231                         inc_all_io_entry(pool, bio);
1232                         cell_defer_no_holder(tc, cell);
1233
1234                         remap_and_issue(tc, bio, lookup_result.block);
1235                 }
1236                 break;
1237
1238         case -ENODATA:
1239                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1240                         inc_all_io_entry(pool, bio);
1241                         cell_defer_no_holder(tc, cell);
1242
1243                         remap_to_origin_and_issue(tc, bio);
1244                 } else
1245                         provision_block(tc, bio, block, cell);
1246                 break;
1247
1248         default:
1249                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1250                             __func__, r);
1251                 cell_defer_no_holder(tc, cell);
1252                 bio_io_error(bio);
1253                 break;
1254         }
1255 }
1256
1257 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1258 {
1259         int r;
1260         int rw = bio_data_dir(bio);
1261         dm_block_t block = get_bio_block(tc, bio);
1262         struct dm_thin_lookup_result lookup_result;
1263
1264         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1265         switch (r) {
1266         case 0:
1267                 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1268                         handle_unserviceable_bio(tc->pool, bio);
1269                 else {
1270                         inc_all_io_entry(tc->pool, bio);
1271                         remap_and_issue(tc, bio, lookup_result.block);
1272                 }
1273                 break;
1274
1275         case -ENODATA:
1276                 if (rw != READ) {
1277                         handle_unserviceable_bio(tc->pool, bio);
1278                         break;
1279                 }
1280
1281                 if (tc->origin_dev) {
1282                         inc_all_io_entry(tc->pool, bio);
1283                         remap_to_origin_and_issue(tc, bio);
1284                         break;
1285                 }
1286
1287                 zero_fill_bio(bio);
1288                 bio_endio(bio, 0);
1289                 break;
1290
1291         default:
1292                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1293                             __func__, r);
1294                 bio_io_error(bio);
1295                 break;
1296         }
1297 }
1298
1299 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1300 {
1301         bio_io_error(bio);
1302 }
1303
1304 /*
1305  * FIXME: should we also commit due to size of transaction, measured in
1306  * metadata blocks?
1307  */
1308 static int need_commit_due_to_time(struct pool *pool)
1309 {
1310         return jiffies < pool->last_commit_jiffies ||
1311                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1312 }
1313
1314 static void process_deferred_bios(struct pool *pool)
1315 {
1316         unsigned long flags;
1317         struct bio *bio;
1318         struct bio_list bios;
1319
1320         bio_list_init(&bios);
1321
1322         spin_lock_irqsave(&pool->lock, flags);
1323         bio_list_merge(&bios, &pool->deferred_bios);
1324         bio_list_init(&pool->deferred_bios);
1325         spin_unlock_irqrestore(&pool->lock, flags);
1326
1327         while ((bio = bio_list_pop(&bios))) {
1328                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1329                 struct thin_c *tc = h->tc;
1330
1331                 /*
1332                  * If we've got no free new_mapping structs, and processing
1333                  * this bio might require one, we pause until there are some
1334                  * prepared mappings to process.
1335                  */
1336                 if (ensure_next_mapping(pool)) {
1337                         spin_lock_irqsave(&pool->lock, flags);
1338                         bio_list_merge(&pool->deferred_bios, &bios);
1339                         spin_unlock_irqrestore(&pool->lock, flags);
1340
1341                         break;
1342                 }
1343
1344                 if (bio->bi_rw & REQ_DISCARD)
1345                         pool->process_discard(tc, bio);
1346                 else
1347                         pool->process_bio(tc, bio);
1348         }
1349
1350         /*
1351          * If there are any deferred flush bios, we must commit
1352          * the metadata before issuing them.
1353          */
1354         bio_list_init(&bios);
1355         spin_lock_irqsave(&pool->lock, flags);
1356         bio_list_merge(&bios, &pool->deferred_flush_bios);
1357         bio_list_init(&pool->deferred_flush_bios);
1358         spin_unlock_irqrestore(&pool->lock, flags);
1359
1360         if (bio_list_empty(&bios) &&
1361             !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1362                 return;
1363
1364         if (commit(pool)) {
1365                 while ((bio = bio_list_pop(&bios)))
1366                         bio_io_error(bio);
1367                 return;
1368         }
1369         pool->last_commit_jiffies = jiffies;
1370
1371         while ((bio = bio_list_pop(&bios)))
1372                 generic_make_request(bio);
1373 }
1374
1375 static void do_worker(struct work_struct *ws)
1376 {
1377         struct pool *pool = container_of(ws, struct pool, worker);
1378
1379         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1380         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1381         process_deferred_bios(pool);
1382 }
1383
1384 /*
1385  * We want to commit periodically so that not too much
1386  * unwritten data builds up.
1387  */
1388 static void do_waker(struct work_struct *ws)
1389 {
1390         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1391         wake_worker(pool);
1392         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1393 }
1394
1395 /*----------------------------------------------------------------*/
1396
1397 static enum pool_mode get_pool_mode(struct pool *pool)
1398 {
1399         return pool->pf.mode;
1400 }
1401
1402 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1403 {
1404         int r;
1405         struct pool_c *pt = pool->ti->private;
1406         enum pool_mode old_mode = pool->pf.mode;
1407
1408         switch (new_mode) {
1409         case PM_FAIL:
1410                 if (old_mode != new_mode)
1411                         DMERR("%s: switching pool to failure mode",
1412                               dm_device_name(pool->pool_md));
1413                 dm_pool_metadata_read_only(pool->pmd);
1414                 pool->process_bio = process_bio_fail;
1415                 pool->process_discard = process_bio_fail;
1416                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1417                 pool->process_prepared_discard = process_prepared_discard_fail;
1418                 break;
1419
1420         case PM_READ_ONLY:
1421                 if (old_mode != new_mode)
1422                         DMERR("%s: switching pool to read-only mode",
1423                               dm_device_name(pool->pool_md));
1424                 r = dm_pool_abort_metadata(pool->pmd);
1425                 if (r) {
1426                         DMERR("%s: aborting transaction failed",
1427                               dm_device_name(pool->pool_md));
1428                         new_mode = PM_FAIL;
1429                         set_pool_mode(pool, new_mode);
1430                 } else {
1431                         dm_pool_metadata_read_only(pool->pmd);
1432                         pool->process_bio = process_bio_read_only;
1433                         pool->process_discard = process_discard;
1434                         pool->process_prepared_mapping = process_prepared_mapping_fail;
1435                         pool->process_prepared_discard = process_prepared_discard_passdown;
1436                 }
1437                 break;
1438
1439         case PM_WRITE:
1440                 if (old_mode != new_mode)
1441                         DMINFO("%s: switching pool to write mode",
1442                                dm_device_name(pool->pool_md));
1443                 dm_pool_metadata_read_write(pool->pmd);
1444                 pool->process_bio = process_bio;
1445                 pool->process_discard = process_discard;
1446                 pool->process_prepared_mapping = process_prepared_mapping;
1447                 pool->process_prepared_discard = process_prepared_discard;
1448                 break;
1449         }
1450
1451         pool->pf.mode = new_mode;
1452         /*
1453          * The pool mode may have changed, sync it so bind_control_target()
1454          * doesn't cause an unexpected mode transition on resume.
1455          */
1456         pt->adjusted_pf.mode = new_mode;
1457 }
1458
1459 /*
1460  * Rather than calling set_pool_mode directly, use these which describe the
1461  * reason for mode degradation.
1462  */
1463 static void out_of_data_space(struct pool *pool)
1464 {
1465         DMERR_LIMIT("%s: no free data space available.",
1466                     dm_device_name(pool->pool_md));
1467         set_pool_mode(pool, PM_READ_ONLY);
1468 }
1469
1470 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1471 {
1472         dm_block_t free_blocks;
1473
1474         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1475                     dm_device_name(pool->pool_md), op, r);
1476
1477         if (r == -ENOSPC &&
1478             !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
1479             !free_blocks)
1480                 DMERR_LIMIT("%s: no free metadata space available.",
1481                             dm_device_name(pool->pool_md));
1482
1483         set_pool_mode(pool, PM_READ_ONLY);
1484 }
1485
1486 /*----------------------------------------------------------------*/
1487
1488 /*
1489  * Mapping functions.
1490  */
1491
1492 /*
1493  * Called only while mapping a thin bio to hand it over to the workqueue.
1494  */
1495 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1496 {
1497         unsigned long flags;
1498         struct pool *pool = tc->pool;
1499
1500         spin_lock_irqsave(&pool->lock, flags);
1501         bio_list_add(&pool->deferred_bios, bio);
1502         spin_unlock_irqrestore(&pool->lock, flags);
1503
1504         wake_worker(pool);
1505 }
1506
1507 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1508 {
1509         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1510
1511         h->tc = tc;
1512         h->shared_read_entry = NULL;
1513         h->all_io_entry = NULL;
1514         h->overwrite_mapping = NULL;
1515 }
1516
1517 /*
1518  * Non-blocking function called from the thin target's map function.
1519  */
1520 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1521 {
1522         int r;
1523         struct thin_c *tc = ti->private;
1524         dm_block_t block = get_bio_block(tc, bio);
1525         struct dm_thin_device *td = tc->td;
1526         struct dm_thin_lookup_result result;
1527         struct dm_bio_prison_cell cell1, cell2;
1528         struct dm_bio_prison_cell *cell_result;
1529         struct dm_cell_key key;
1530
1531         thin_hook_bio(tc, bio);
1532
1533         if (get_pool_mode(tc->pool) == PM_FAIL) {
1534                 bio_io_error(bio);
1535                 return DM_MAPIO_SUBMITTED;
1536         }
1537
1538         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1539                 thin_defer_bio(tc, bio);
1540                 return DM_MAPIO_SUBMITTED;
1541         }
1542
1543         r = dm_thin_find_block(td, block, 0, &result);
1544
1545         /*
1546          * Note that we defer readahead too.
1547          */
1548         switch (r) {
1549         case 0:
1550                 if (unlikely(result.shared)) {
1551                         /*
1552                          * We have a race condition here between the
1553                          * result.shared value returned by the lookup and
1554                          * snapshot creation, which may cause new
1555                          * sharing.
1556                          *
1557                          * To avoid this always quiesce the origin before
1558                          * taking the snap.  You want to do this anyway to
1559                          * ensure a consistent application view
1560                          * (i.e. lockfs).
1561                          *
1562                          * More distant ancestors are irrelevant. The
1563                          * shared flag will be set in their case.
1564                          */
1565                         thin_defer_bio(tc, bio);
1566                         return DM_MAPIO_SUBMITTED;
1567                 }
1568
1569                 build_virtual_key(tc->td, block, &key);
1570                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1571                         return DM_MAPIO_SUBMITTED;
1572
1573                 build_data_key(tc->td, result.block, &key);
1574                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1575                         cell_defer_no_holder_no_free(tc, &cell1);
1576                         return DM_MAPIO_SUBMITTED;
1577                 }
1578
1579                 inc_all_io_entry(tc->pool, bio);
1580                 cell_defer_no_holder_no_free(tc, &cell2);
1581                 cell_defer_no_holder_no_free(tc, &cell1);
1582
1583                 remap(tc, bio, result.block);
1584                 return DM_MAPIO_REMAPPED;
1585
1586         case -ENODATA:
1587                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1588                         /*
1589                          * This block isn't provisioned, and we have no way
1590                          * of doing so.
1591                          */
1592                         handle_unserviceable_bio(tc->pool, bio);
1593                         return DM_MAPIO_SUBMITTED;
1594                 }
1595                 /* fall through */
1596
1597         case -EWOULDBLOCK:
1598                 /*
1599                  * In future, the failed dm_thin_find_block above could
1600                  * provide the hint to load the metadata into cache.
1601                  */
1602                 thin_defer_bio(tc, bio);
1603                 return DM_MAPIO_SUBMITTED;
1604
1605         default:
1606                 /*
1607                  * Must always call bio_io_error on failure.
1608                  * dm_thin_find_block can fail with -EINVAL if the
1609                  * pool is switched to fail-io mode.
1610                  */
1611                 bio_io_error(bio);
1612                 return DM_MAPIO_SUBMITTED;
1613         }
1614 }
1615
1616 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1617 {
1618         int r;
1619         unsigned long flags;
1620         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1621
1622         spin_lock_irqsave(&pt->pool->lock, flags);
1623         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1624         spin_unlock_irqrestore(&pt->pool->lock, flags);
1625
1626         if (!r) {
1627                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1628                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1629         }
1630
1631         return r;
1632 }
1633
1634 static void __requeue_bios(struct pool *pool)
1635 {
1636         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1637         bio_list_init(&pool->retry_on_resume_list);
1638 }
1639
1640 /*----------------------------------------------------------------
1641  * Binding of control targets to a pool object
1642  *--------------------------------------------------------------*/
1643 static bool data_dev_supports_discard(struct pool_c *pt)
1644 {
1645         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1646
1647         return q && blk_queue_discard(q);
1648 }
1649
1650 static bool is_factor(sector_t block_size, uint32_t n)
1651 {
1652         return !sector_div(block_size, n);
1653 }
1654
1655 /*
1656  * If discard_passdown was enabled verify that the data device
1657  * supports discards.  Disable discard_passdown if not.
1658  */
1659 static void disable_passdown_if_not_supported(struct pool_c *pt)
1660 {
1661         struct pool *pool = pt->pool;
1662         struct block_device *data_bdev = pt->data_dev->bdev;
1663         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1664         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1665         const char *reason = NULL;
1666         char buf[BDEVNAME_SIZE];
1667
1668         if (!pt->adjusted_pf.discard_passdown)
1669                 return;
1670
1671         if (!data_dev_supports_discard(pt))
1672                 reason = "discard unsupported";
1673
1674         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1675                 reason = "max discard sectors smaller than a block";
1676
1677         else if (data_limits->discard_granularity > block_size)
1678                 reason = "discard granularity larger than a block";
1679
1680         else if (!is_factor(block_size, data_limits->discard_granularity))
1681                 reason = "discard granularity not a factor of block size";
1682
1683         if (reason) {
1684                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1685                 pt->adjusted_pf.discard_passdown = false;
1686         }
1687 }
1688
1689 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1690 {
1691         struct pool_c *pt = ti->private;
1692
1693         /*
1694          * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1695          */
1696         enum pool_mode old_mode = pool->pf.mode;
1697         enum pool_mode new_mode = pt->adjusted_pf.mode;
1698
1699         /*
1700          * Don't change the pool's mode until set_pool_mode() below.
1701          * Otherwise the pool's process_* function pointers may
1702          * not match the desired pool mode.
1703          */
1704         pt->adjusted_pf.mode = old_mode;
1705
1706         pool->ti = ti;
1707         pool->pf = pt->adjusted_pf;
1708         pool->low_water_blocks = pt->low_water_blocks;
1709
1710         /*
1711          * If we were in PM_FAIL mode, rollback of metadata failed.  We're
1712          * not going to recover without a thin_repair.  So we never let the
1713          * pool move out of the old mode.  On the other hand a PM_READ_ONLY
1714          * may have been due to a lack of metadata or data space, and may
1715          * now work (ie. if the underlying devices have been resized).
1716          */
1717         if (old_mode == PM_FAIL)
1718                 new_mode = old_mode;
1719
1720         set_pool_mode(pool, new_mode);
1721
1722         return 0;
1723 }
1724
1725 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1726 {
1727         if (pool->ti == ti)
1728                 pool->ti = NULL;
1729 }
1730
1731 /*----------------------------------------------------------------
1732  * Pool creation
1733  *--------------------------------------------------------------*/
1734 /* Initialize pool features. */
1735 static void pool_features_init(struct pool_features *pf)
1736 {
1737         pf->mode = PM_WRITE;
1738         pf->zero_new_blocks = true;
1739         pf->discard_enabled = true;
1740         pf->discard_passdown = true;
1741         pf->error_if_no_space = false;
1742 }
1743
1744 static void __pool_destroy(struct pool *pool)
1745 {
1746         __pool_table_remove(pool);
1747
1748         if (dm_pool_metadata_close(pool->pmd) < 0)
1749                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1750
1751         dm_bio_prison_destroy(pool->prison);
1752         dm_kcopyd_client_destroy(pool->copier);
1753
1754         if (pool->wq)
1755                 destroy_workqueue(pool->wq);
1756
1757         if (pool->next_mapping)
1758                 mempool_free(pool->next_mapping, pool->mapping_pool);
1759         mempool_destroy(pool->mapping_pool);
1760         dm_deferred_set_destroy(pool->shared_read_ds);
1761         dm_deferred_set_destroy(pool->all_io_ds);
1762         kfree(pool);
1763 }
1764
1765 static struct kmem_cache *_new_mapping_cache;
1766
1767 static struct pool *pool_create(struct mapped_device *pool_md,
1768                                 struct block_device *metadata_dev,
1769                                 unsigned long block_size,
1770                                 int read_only, char **error)
1771 {
1772         int r;
1773         void *err_p;
1774         struct pool *pool;
1775         struct dm_pool_metadata *pmd;
1776         bool format_device = read_only ? false : true;
1777
1778         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1779         if (IS_ERR(pmd)) {
1780                 *error = "Error creating metadata object";
1781                 return (struct pool *)pmd;
1782         }
1783
1784         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1785         if (!pool) {
1786                 *error = "Error allocating memory for pool";
1787                 err_p = ERR_PTR(-ENOMEM);
1788                 goto bad_pool;
1789         }
1790
1791         pool->pmd = pmd;
1792         pool->sectors_per_block = block_size;
1793         if (block_size & (block_size - 1))
1794                 pool->sectors_per_block_shift = -1;
1795         else
1796                 pool->sectors_per_block_shift = __ffs(block_size);
1797         pool->low_water_blocks = 0;
1798         pool_features_init(&pool->pf);
1799         pool->prison = dm_bio_prison_create(PRISON_CELLS);
1800         if (!pool->prison) {
1801                 *error = "Error creating pool's bio prison";
1802                 err_p = ERR_PTR(-ENOMEM);
1803                 goto bad_prison;
1804         }
1805
1806         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1807         if (IS_ERR(pool->copier)) {
1808                 r = PTR_ERR(pool->copier);
1809                 *error = "Error creating pool's kcopyd client";
1810                 err_p = ERR_PTR(r);
1811                 goto bad_kcopyd_client;
1812         }
1813
1814         /*
1815          * Create singlethreaded workqueue that will service all devices
1816          * that use this metadata.
1817          */
1818         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1819         if (!pool->wq) {
1820                 *error = "Error creating pool's workqueue";
1821                 err_p = ERR_PTR(-ENOMEM);
1822                 goto bad_wq;
1823         }
1824
1825         INIT_WORK(&pool->worker, do_worker);
1826         INIT_DELAYED_WORK(&pool->waker, do_waker);
1827         spin_lock_init(&pool->lock);
1828         bio_list_init(&pool->deferred_bios);
1829         bio_list_init(&pool->deferred_flush_bios);
1830         INIT_LIST_HEAD(&pool->prepared_mappings);
1831         INIT_LIST_HEAD(&pool->prepared_discards);
1832         pool->low_water_triggered = false;
1833         bio_list_init(&pool->retry_on_resume_list);
1834
1835         pool->shared_read_ds = dm_deferred_set_create();
1836         if (!pool->shared_read_ds) {
1837                 *error = "Error creating pool's shared read deferred set";
1838                 err_p = ERR_PTR(-ENOMEM);
1839                 goto bad_shared_read_ds;
1840         }
1841
1842         pool->all_io_ds = dm_deferred_set_create();
1843         if (!pool->all_io_ds) {
1844                 *error = "Error creating pool's all io deferred set";
1845                 err_p = ERR_PTR(-ENOMEM);
1846                 goto bad_all_io_ds;
1847         }
1848
1849         pool->next_mapping = NULL;
1850         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1851                                                       _new_mapping_cache);
1852         if (!pool->mapping_pool) {
1853                 *error = "Error creating pool's mapping mempool";
1854                 err_p = ERR_PTR(-ENOMEM);
1855                 goto bad_mapping_pool;
1856         }
1857
1858         pool->ref_count = 1;
1859         pool->last_commit_jiffies = jiffies;
1860         pool->pool_md = pool_md;
1861         pool->md_dev = metadata_dev;
1862         __pool_table_insert(pool);
1863
1864         return pool;
1865
1866 bad_mapping_pool:
1867         dm_deferred_set_destroy(pool->all_io_ds);
1868 bad_all_io_ds:
1869         dm_deferred_set_destroy(pool->shared_read_ds);
1870 bad_shared_read_ds:
1871         destroy_workqueue(pool->wq);
1872 bad_wq:
1873         dm_kcopyd_client_destroy(pool->copier);
1874 bad_kcopyd_client:
1875         dm_bio_prison_destroy(pool->prison);
1876 bad_prison:
1877         kfree(pool);
1878 bad_pool:
1879         if (dm_pool_metadata_close(pmd))
1880                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1881
1882         return err_p;
1883 }
1884
1885 static void __pool_inc(struct pool *pool)
1886 {
1887         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1888         pool->ref_count++;
1889 }
1890
1891 static void __pool_dec(struct pool *pool)
1892 {
1893         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1894         BUG_ON(!pool->ref_count);
1895         if (!--pool->ref_count)
1896                 __pool_destroy(pool);
1897 }
1898
1899 static struct pool *__pool_find(struct mapped_device *pool_md,
1900                                 struct block_device *metadata_dev,
1901                                 unsigned long block_size, int read_only,
1902                                 char **error, int *created)
1903 {
1904         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1905
1906         if (pool) {
1907                 if (pool->pool_md != pool_md) {
1908                         *error = "metadata device already in use by a pool";
1909                         return ERR_PTR(-EBUSY);
1910                 }
1911                 __pool_inc(pool);
1912
1913         } else {
1914                 pool = __pool_table_lookup(pool_md);
1915                 if (pool) {
1916                         if (pool->md_dev != metadata_dev) {
1917                                 *error = "different pool cannot replace a pool";
1918                                 return ERR_PTR(-EINVAL);
1919                         }
1920                         __pool_inc(pool);
1921
1922                 } else {
1923                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1924                         *created = 1;
1925                 }
1926         }
1927
1928         return pool;
1929 }
1930
1931 /*----------------------------------------------------------------
1932  * Pool target methods
1933  *--------------------------------------------------------------*/
1934 static void pool_dtr(struct dm_target *ti)
1935 {
1936         struct pool_c *pt = ti->private;
1937
1938         mutex_lock(&dm_thin_pool_table.mutex);
1939
1940         unbind_control_target(pt->pool, ti);
1941         __pool_dec(pt->pool);
1942         dm_put_device(ti, pt->metadata_dev);
1943         dm_put_device(ti, pt->data_dev);
1944         kfree(pt);
1945
1946         mutex_unlock(&dm_thin_pool_table.mutex);
1947 }
1948
1949 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1950                                struct dm_target *ti)
1951 {
1952         int r;
1953         unsigned argc;
1954         const char *arg_name;
1955
1956         static struct dm_arg _args[] = {
1957                 {0, 4, "Invalid number of pool feature arguments"},
1958         };
1959
1960         /*
1961          * No feature arguments supplied.
1962          */
1963         if (!as->argc)
1964                 return 0;
1965
1966         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1967         if (r)
1968                 return -EINVAL;
1969
1970         while (argc && !r) {
1971                 arg_name = dm_shift_arg(as);
1972                 argc--;
1973
1974                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1975                         pf->zero_new_blocks = false;
1976
1977                 else if (!strcasecmp(arg_name, "ignore_discard"))
1978                         pf->discard_enabled = false;
1979
1980                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1981                         pf->discard_passdown = false;
1982
1983                 else if (!strcasecmp(arg_name, "read_only"))
1984                         pf->mode = PM_READ_ONLY;
1985
1986                 else if (!strcasecmp(arg_name, "error_if_no_space"))
1987                         pf->error_if_no_space = true;
1988
1989                 else {
1990                         ti->error = "Unrecognised pool feature requested";
1991                         r = -EINVAL;
1992                         break;
1993                 }
1994         }
1995
1996         return r;
1997 }
1998
1999 static void metadata_low_callback(void *context)
2000 {
2001         struct pool *pool = context;
2002
2003         DMWARN("%s: reached low water mark for metadata device: sending event.",
2004                dm_device_name(pool->pool_md));
2005
2006         dm_table_event(pool->ti->table);
2007 }
2008
2009 static sector_t get_dev_size(struct block_device *bdev)
2010 {
2011         return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2012 }
2013
2014 static void warn_if_metadata_device_too_big(struct block_device *bdev)
2015 {
2016         sector_t metadata_dev_size = get_dev_size(bdev);
2017         char buffer[BDEVNAME_SIZE];
2018
2019         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2020                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2021                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2022 }
2023
2024 static sector_t get_metadata_dev_size(struct block_device *bdev)
2025 {
2026         sector_t metadata_dev_size = get_dev_size(bdev);
2027
2028         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2029                 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
2030
2031         return metadata_dev_size;
2032 }
2033
2034 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2035 {
2036         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2037
2038         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
2039
2040         return metadata_dev_size;
2041 }
2042
2043 /*
2044  * When a metadata threshold is crossed a dm event is triggered, and
2045  * userland should respond by growing the metadata device.  We could let
2046  * userland set the threshold, like we do with the data threshold, but I'm
2047  * not sure they know enough to do this well.
2048  */
2049 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2050 {
2051         /*
2052          * 4M is ample for all ops with the possible exception of thin
2053          * device deletion which is harmless if it fails (just retry the
2054          * delete after you've grown the device).
2055          */
2056         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2057         return min((dm_block_t)1024ULL /* 4M */, quarter);
2058 }
2059
2060 /*
2061  * thin-pool <metadata dev> <data dev>
2062  *           <data block size (sectors)>
2063  *           <low water mark (blocks)>
2064  *           [<#feature args> [<arg>]*]
2065  *
2066  * Optional feature arguments are:
2067  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2068  *           ignore_discard: disable discard
2069  *           no_discard_passdown: don't pass discards down to the data device
2070  *           read_only: Don't allow any changes to be made to the pool metadata.
2071  *           error_if_no_space: error IOs, instead of queueing, if no space.
2072  */
2073 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2074 {
2075         int r, pool_created = 0;
2076         struct pool_c *pt;
2077         struct pool *pool;
2078         struct pool_features pf;
2079         struct dm_arg_set as;
2080         struct dm_dev *data_dev;
2081         unsigned long block_size;
2082         dm_block_t low_water_blocks;
2083         struct dm_dev *metadata_dev;
2084         fmode_t metadata_mode;
2085
2086         /*
2087          * FIXME Remove validation from scope of lock.
2088          */
2089         mutex_lock(&dm_thin_pool_table.mutex);
2090
2091         if (argc < 4) {
2092                 ti->error = "Invalid argument count";
2093                 r = -EINVAL;
2094                 goto out_unlock;
2095         }
2096
2097         as.argc = argc;
2098         as.argv = argv;
2099
2100         /*
2101          * Set default pool features.
2102          */
2103         pool_features_init(&pf);
2104
2105         dm_consume_args(&as, 4);
2106         r = parse_pool_features(&as, &pf, ti);
2107         if (r)
2108                 goto out_unlock;
2109
2110         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2111         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2112         if (r) {
2113                 ti->error = "Error opening metadata block device";
2114                 goto out_unlock;
2115         }
2116         warn_if_metadata_device_too_big(metadata_dev->bdev);
2117
2118         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2119         if (r) {
2120                 ti->error = "Error getting data device";
2121                 goto out_metadata;
2122         }
2123
2124         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2125             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2126             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2127             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2128                 ti->error = "Invalid block size";
2129                 r = -EINVAL;
2130                 goto out;
2131         }
2132
2133         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2134                 ti->error = "Invalid low water mark";
2135                 r = -EINVAL;
2136                 goto out;
2137         }
2138
2139         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2140         if (!pt) {
2141                 r = -ENOMEM;
2142                 goto out;
2143         }
2144
2145         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2146                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2147         if (IS_ERR(pool)) {
2148                 r = PTR_ERR(pool);
2149                 goto out_free_pt;
2150         }
2151
2152         /*
2153          * 'pool_created' reflects whether this is the first table load.
2154          * Top level discard support is not allowed to be changed after
2155          * initial load.  This would require a pool reload to trigger thin
2156          * device changes.
2157          */
2158         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2159                 ti->error = "Discard support cannot be disabled once enabled";
2160                 r = -EINVAL;
2161                 goto out_flags_changed;
2162         }
2163
2164         pt->pool = pool;
2165         pt->ti = ti;
2166         pt->metadata_dev = metadata_dev;
2167         pt->data_dev = data_dev;
2168         pt->low_water_blocks = low_water_blocks;
2169         pt->adjusted_pf = pt->requested_pf = pf;
2170         ti->num_flush_bios = 1;
2171
2172         /*
2173          * Only need to enable discards if the pool should pass
2174          * them down to the data device.  The thin device's discard
2175          * processing will cause mappings to be removed from the btree.
2176          */
2177         ti->discard_zeroes_data_unsupported = true;
2178         if (pf.discard_enabled && pf.discard_passdown) {
2179                 ti->num_discard_bios = 1;
2180
2181                 /*
2182                  * Setting 'discards_supported' circumvents the normal
2183                  * stacking of discard limits (this keeps the pool and
2184                  * thin devices' discard limits consistent).
2185                  */
2186                 ti->discards_supported = true;
2187         }
2188         ti->private = pt;
2189
2190         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2191                                                 calc_metadata_threshold(pt),
2192                                                 metadata_low_callback,
2193                                                 pool);
2194         if (r)
2195                 goto out_free_pt;
2196
2197         pt->callbacks.congested_fn = pool_is_congested;
2198         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2199
2200         mutex_unlock(&dm_thin_pool_table.mutex);
2201
2202         return 0;
2203
2204 out_flags_changed:
2205         __pool_dec(pool);
2206 out_free_pt:
2207         kfree(pt);
2208 out:
2209         dm_put_device(ti, data_dev);
2210 out_metadata:
2211         dm_put_device(ti, metadata_dev);
2212 out_unlock:
2213         mutex_unlock(&dm_thin_pool_table.mutex);
2214
2215         return r;
2216 }
2217
2218 static int pool_map(struct dm_target *ti, struct bio *bio)
2219 {
2220         int r;
2221         struct pool_c *pt = ti->private;
2222         struct pool *pool = pt->pool;
2223         unsigned long flags;
2224
2225         /*
2226          * As this is a singleton target, ti->begin is always zero.
2227          */
2228         spin_lock_irqsave(&pool->lock, flags);
2229         bio->bi_bdev = pt->data_dev->bdev;
2230         r = DM_MAPIO_REMAPPED;
2231         spin_unlock_irqrestore(&pool->lock, flags);
2232
2233         return r;
2234 }
2235
2236 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2237 {
2238         int r;
2239         struct pool_c *pt = ti->private;
2240         struct pool *pool = pt->pool;
2241         sector_t data_size = ti->len;
2242         dm_block_t sb_data_size;
2243
2244         *need_commit = false;
2245
2246         (void) sector_div(data_size, pool->sectors_per_block);
2247
2248         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2249         if (r) {
2250                 DMERR("%s: failed to retrieve data device size",
2251                       dm_device_name(pool->pool_md));
2252                 return r;
2253         }
2254
2255         if (data_size < sb_data_size) {
2256                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2257                       dm_device_name(pool->pool_md),
2258                       (unsigned long long)data_size, sb_data_size);
2259                 return -EINVAL;
2260
2261         } else if (data_size > sb_data_size) {
2262                 if (sb_data_size)
2263                         DMINFO("%s: growing the data device from %llu to %llu blocks",
2264                                dm_device_name(pool->pool_md),
2265                                sb_data_size, (unsigned long long)data_size);
2266                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2267                 if (r) {
2268                         metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
2269                         return r;
2270                 }
2271
2272                 *need_commit = true;
2273         }
2274
2275         return 0;
2276 }
2277
2278 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2279 {
2280         int r;
2281         struct pool_c *pt = ti->private;
2282         struct pool *pool = pt->pool;
2283         dm_block_t metadata_dev_size, sb_metadata_dev_size;
2284
2285         *need_commit = false;
2286
2287         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2288
2289         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2290         if (r) {
2291                 DMERR("%s: failed to retrieve metadata device size",
2292                       dm_device_name(pool->pool_md));
2293                 return r;
2294         }
2295
2296         if (metadata_dev_size < sb_metadata_dev_size) {
2297                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2298                       dm_device_name(pool->pool_md),
2299                       metadata_dev_size, sb_metadata_dev_size);
2300                 return -EINVAL;
2301
2302         } else if (metadata_dev_size > sb_metadata_dev_size) {
2303                 warn_if_metadata_device_too_big(pool->md_dev);
2304                 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2305                        dm_device_name(pool->pool_md),
2306                        sb_metadata_dev_size, metadata_dev_size);
2307                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2308                 if (r) {
2309                         metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
2310                         return r;
2311                 }
2312
2313                 *need_commit = true;
2314         }
2315
2316         return 0;
2317 }
2318
2319 /*
2320  * Retrieves the number of blocks of the data device from
2321  * the superblock and compares it to the actual device size,
2322  * thus resizing the data device in case it has grown.
2323  *
2324  * This both copes with opening preallocated data devices in the ctr
2325  * being followed by a resume
2326  * -and-
2327  * calling the resume method individually after userspace has
2328  * grown the data device in reaction to a table event.
2329  */
2330 static int pool_preresume(struct dm_target *ti)
2331 {
2332         int r;
2333         bool need_commit1, need_commit2;
2334         struct pool_c *pt = ti->private;
2335         struct pool *pool = pt->pool;
2336
2337         /*
2338          * Take control of the pool object.
2339          */
2340         r = bind_control_target(pool, ti);
2341         if (r)
2342                 return r;
2343
2344         r = maybe_resize_data_dev(ti, &need_commit1);
2345         if (r)
2346                 return r;
2347
2348         r = maybe_resize_metadata_dev(ti, &need_commit2);
2349         if (r)
2350                 return r;
2351
2352         if (need_commit1 || need_commit2)
2353                 (void) commit(pool);
2354
2355         return 0;
2356 }
2357
2358 static void pool_resume(struct dm_target *ti)
2359 {
2360         struct pool_c *pt = ti->private;
2361         struct pool *pool = pt->pool;
2362         unsigned long flags;
2363
2364         spin_lock_irqsave(&pool->lock, flags);
2365         pool->low_water_triggered = false;
2366         __requeue_bios(pool);
2367         spin_unlock_irqrestore(&pool->lock, flags);
2368
2369         do_waker(&pool->waker.work);
2370 }
2371
2372 static void pool_postsuspend(struct dm_target *ti)
2373 {
2374         struct pool_c *pt = ti->private;
2375         struct pool *pool = pt->pool;
2376
2377         cancel_delayed_work(&pool->waker);
2378         flush_workqueue(pool->wq);
2379         (void) commit(pool);
2380 }
2381
2382 static int check_arg_count(unsigned argc, unsigned args_required)
2383 {
2384         if (argc != args_required) {
2385                 DMWARN("Message received with %u arguments instead of %u.",
2386                        argc, args_required);
2387                 return -EINVAL;
2388         }
2389
2390         return 0;
2391 }
2392
2393 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2394 {
2395         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2396             *dev_id <= MAX_DEV_ID)
2397                 return 0;
2398
2399         if (warning)
2400                 DMWARN("Message received with invalid device id: %s", arg);
2401
2402         return -EINVAL;
2403 }
2404
2405 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2406 {
2407         dm_thin_id dev_id;
2408         int r;
2409
2410         r = check_arg_count(argc, 2);
2411         if (r)
2412                 return r;
2413
2414         r = read_dev_id(argv[1], &dev_id, 1);
2415         if (r)
2416                 return r;
2417
2418         r = dm_pool_create_thin(pool->pmd, dev_id);
2419         if (r) {
2420                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2421                        argv[1]);
2422                 return r;
2423         }
2424
2425         return 0;
2426 }
2427
2428 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2429 {
2430         dm_thin_id dev_id;
2431         dm_thin_id origin_dev_id;
2432         int r;
2433
2434         r = check_arg_count(argc, 3);
2435         if (r)
2436                 return r;
2437
2438         r = read_dev_id(argv[1], &dev_id, 1);
2439         if (r)
2440                 return r;
2441
2442         r = read_dev_id(argv[2], &origin_dev_id, 1);
2443         if (r)
2444                 return r;
2445
2446         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2447         if (r) {
2448                 DMWARN("Creation of new snapshot %s of device %s failed.",
2449                        argv[1], argv[2]);
2450                 return r;
2451         }
2452
2453         return 0;
2454 }
2455
2456 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2457 {
2458         dm_thin_id dev_id;
2459         int r;
2460
2461         r = check_arg_count(argc, 2);
2462         if (r)
2463                 return r;
2464
2465         r = read_dev_id(argv[1], &dev_id, 1);
2466         if (r)
2467                 return r;
2468
2469         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2470         if (r)
2471                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2472
2473         return r;
2474 }
2475
2476 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2477 {
2478         dm_thin_id old_id, new_id;
2479         int r;
2480
2481         r = check_arg_count(argc, 3);
2482         if (r)
2483                 return r;
2484
2485         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2486                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2487                 return -EINVAL;
2488         }
2489
2490         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2491                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2492                 return -EINVAL;
2493         }
2494
2495         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2496         if (r) {
2497                 DMWARN("Failed to change transaction id from %s to %s.",
2498                        argv[1], argv[2]);
2499                 return r;
2500         }
2501
2502         return 0;
2503 }
2504
2505 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2506 {
2507         int r;
2508
2509         r = check_arg_count(argc, 1);
2510         if (r)
2511                 return r;
2512
2513         (void) commit(pool);
2514
2515         r = dm_pool_reserve_metadata_snap(pool->pmd);
2516         if (r)
2517                 DMWARN("reserve_metadata_snap message failed.");
2518
2519         return r;
2520 }
2521
2522 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2523 {
2524         int r;
2525
2526         r = check_arg_count(argc, 1);
2527         if (r)
2528                 return r;
2529
2530         r = dm_pool_release_metadata_snap(pool->pmd);
2531         if (r)
2532                 DMWARN("release_metadata_snap message failed.");
2533
2534         return r;
2535 }
2536
2537 /*
2538  * Messages supported:
2539  *   create_thin        <dev_id>
2540  *   create_snap        <dev_id> <origin_id>
2541  *   delete             <dev_id>
2542  *   trim               <dev_id> <new_size_in_sectors>
2543  *   set_transaction_id <current_trans_id> <new_trans_id>
2544  *   reserve_metadata_snap
2545  *   release_metadata_snap
2546  */
2547 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2548 {
2549         int r = -EINVAL;
2550         struct pool_c *pt = ti->private;
2551         struct pool *pool = pt->pool;
2552
2553         if (!strcasecmp(argv[0], "create_thin"))
2554                 r = process_create_thin_mesg(argc, argv, pool);
2555
2556         else if (!strcasecmp(argv[0], "create_snap"))
2557                 r = process_create_snap_mesg(argc, argv, pool);
2558
2559         else if (!strcasecmp(argv[0], "delete"))
2560                 r = process_delete_mesg(argc, argv, pool);
2561
2562         else if (!strcasecmp(argv[0], "set_transaction_id"))
2563                 r = process_set_transaction_id_mesg(argc, argv, pool);
2564
2565         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2566                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2567
2568         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2569                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2570
2571         else
2572                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2573
2574         if (!r)
2575                 (void) commit(pool);
2576
2577         return r;
2578 }
2579
2580 static void emit_flags(struct pool_features *pf, char *result,
2581                        unsigned sz, unsigned maxlen)
2582 {
2583         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2584                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2585                 pf->error_if_no_space;
2586         DMEMIT("%u ", count);
2587
2588         if (!pf->zero_new_blocks)
2589                 DMEMIT("skip_block_zeroing ");
2590
2591         if (!pf->discard_enabled)
2592                 DMEMIT("ignore_discard ");
2593
2594         if (!pf->discard_passdown)
2595                 DMEMIT("no_discard_passdown ");
2596
2597         if (pf->mode == PM_READ_ONLY)
2598                 DMEMIT("read_only ");
2599
2600         if (pf->error_if_no_space)
2601                 DMEMIT("error_if_no_space ");
2602 }
2603
2604 /*
2605  * Status line is:
2606  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2607  *    <used data sectors>/<total data sectors> <held metadata root>
2608  */
2609 static void pool_status(struct dm_target *ti, status_type_t type,
2610                         unsigned status_flags, char *result, unsigned maxlen)
2611 {
2612         int r;
2613         unsigned sz = 0;
2614         uint64_t transaction_id;
2615         dm_block_t nr_free_blocks_data;
2616         dm_block_t nr_free_blocks_metadata;
2617         dm_block_t nr_blocks_data;
2618         dm_block_t nr_blocks_metadata;
2619         dm_block_t held_root;
2620         char buf[BDEVNAME_SIZE];
2621         char buf2[BDEVNAME_SIZE];
2622         struct pool_c *pt = ti->private;
2623         struct pool *pool = pt->pool;
2624
2625         switch (type) {
2626         case STATUSTYPE_INFO:
2627                 if (get_pool_mode(pool) == PM_FAIL) {
2628                         DMEMIT("Fail");
2629                         break;
2630                 }
2631
2632                 /* Commit to ensure statistics aren't out-of-date */
2633                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2634                         (void) commit(pool);
2635
2636                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2637                 if (r) {
2638                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2639                               dm_device_name(pool->pool_md), r);
2640                         goto err;
2641                 }
2642
2643                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2644                 if (r) {
2645                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2646                               dm_device_name(pool->pool_md), r);
2647                         goto err;
2648                 }
2649
2650                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2651                 if (r) {
2652                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2653                               dm_device_name(pool->pool_md), r);
2654                         goto err;
2655                 }
2656
2657                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2658                 if (r) {
2659                         DMERR("%s: dm_pool_get_free_block_count returned %d",
2660                               dm_device_name(pool->pool_md), r);
2661                         goto err;
2662                 }
2663
2664                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2665                 if (r) {
2666                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
2667                               dm_device_name(pool->pool_md), r);
2668                         goto err;
2669                 }
2670
2671                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2672                 if (r) {
2673                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
2674                               dm_device_name(pool->pool_md), r);
2675                         goto err;
2676                 }
2677
2678                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2679                        (unsigned long long)transaction_id,
2680                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2681                        (unsigned long long)nr_blocks_metadata,
2682                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2683                        (unsigned long long)nr_blocks_data);
2684
2685                 if (held_root)
2686                         DMEMIT("%llu ", held_root);
2687                 else
2688                         DMEMIT("- ");
2689
2690                 if (pool->pf.mode == PM_READ_ONLY)
2691                         DMEMIT("ro ");
2692                 else
2693                         DMEMIT("rw ");
2694
2695                 if (!pool->pf.discard_enabled)
2696                         DMEMIT("ignore_discard ");
2697                 else if (pool->pf.discard_passdown)
2698                         DMEMIT("discard_passdown ");
2699                 else
2700                         DMEMIT("no_discard_passdown ");
2701
2702                 if (pool->pf.error_if_no_space)
2703                         DMEMIT("error_if_no_space ");
2704                 else
2705                         DMEMIT("queue_if_no_space ");
2706
2707                 break;
2708
2709         case STATUSTYPE_TABLE:
2710                 DMEMIT("%s %s %lu %llu ",
2711                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2712                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2713                        (unsigned long)pool->sectors_per_block,
2714                        (unsigned long long)pt->low_water_blocks);
2715                 emit_flags(&pt->requested_pf, result, sz, maxlen);
2716                 break;
2717         }
2718         return;
2719
2720 err:
2721         DMEMIT("Error");
2722 }
2723
2724 static int pool_iterate_devices(struct dm_target *ti,
2725                                 iterate_devices_callout_fn fn, void *data)
2726 {
2727         struct pool_c *pt = ti->private;
2728
2729         return fn(ti, pt->data_dev, 0, ti->len, data);
2730 }
2731
2732 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2733                       struct bio_vec *biovec, int max_size)
2734 {
2735         struct pool_c *pt = ti->private;
2736         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2737
2738         if (!q->merge_bvec_fn)
2739                 return max_size;
2740
2741         bvm->bi_bdev = pt->data_dev->bdev;
2742
2743         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2744 }
2745
2746 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2747 {
2748         struct pool *pool = pt->pool;
2749         struct queue_limits *data_limits;
2750
2751         limits->max_discard_sectors = pool->sectors_per_block;
2752
2753         /*
2754          * discard_granularity is just a hint, and not enforced.
2755          */
2756         if (pt->adjusted_pf.discard_passdown) {
2757                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2758                 limits->discard_granularity = data_limits->discard_granularity;
2759         } else
2760                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2761 }
2762
2763 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2764 {
2765         struct pool_c *pt = ti->private;
2766         struct pool *pool = pt->pool;
2767         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2768
2769         /*
2770          * If the system-determined stacked limits are compatible with the
2771          * pool's blocksize (io_opt is a factor) do not override them.
2772          */
2773         if (io_opt_sectors < pool->sectors_per_block ||
2774             do_div(io_opt_sectors, pool->sectors_per_block)) {
2775                 blk_limits_io_min(limits, 0);
2776                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2777         }
2778
2779         /*
2780          * pt->adjusted_pf is a staging area for the actual features to use.
2781          * They get transferred to the live pool in bind_control_target()
2782          * called from pool_preresume().
2783          */
2784         if (!pt->adjusted_pf.discard_enabled) {
2785                 /*
2786                  * Must explicitly disallow stacking discard limits otherwise the
2787                  * block layer will stack them if pool's data device has support.
2788                  * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2789                  * user to see that, so make sure to set all discard limits to 0.
2790                  */
2791                 limits->discard_granularity = 0;
2792                 return;
2793         }
2794
2795         disable_passdown_if_not_supported(pt);
2796
2797         set_discard_limits(pt, limits);
2798 }
2799
2800 static struct target_type pool_target = {
2801         .name = "thin-pool",
2802         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2803                     DM_TARGET_IMMUTABLE,
2804         .version = {1, 10, 0},
2805         .module = THIS_MODULE,
2806         .ctr = pool_ctr,
2807         .dtr = pool_dtr,
2808         .map = pool_map,
2809         .postsuspend = pool_postsuspend,
2810         .preresume = pool_preresume,
2811         .resume = pool_resume,
2812         .message = pool_message,
2813         .status = pool_status,
2814         .merge = pool_merge,
2815         .iterate_devices = pool_iterate_devices,
2816         .io_hints = pool_io_hints,
2817 };
2818
2819 /*----------------------------------------------------------------
2820  * Thin target methods
2821  *--------------------------------------------------------------*/
2822 static void thin_dtr(struct dm_target *ti)
2823 {
2824         struct thin_c *tc = ti->private;
2825
2826         mutex_lock(&dm_thin_pool_table.mutex);
2827
2828         __pool_dec(tc->pool);
2829         dm_pool_close_thin_device(tc->td);
2830         dm_put_device(ti, tc->pool_dev);
2831         if (tc->origin_dev)
2832                 dm_put_device(ti, tc->origin_dev);
2833         kfree(tc);
2834
2835         mutex_unlock(&dm_thin_pool_table.mutex);
2836 }
2837
2838 /*
2839  * Thin target parameters:
2840  *
2841  * <pool_dev> <dev_id> [origin_dev]
2842  *
2843  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2844  * dev_id: the internal device identifier
2845  * origin_dev: a device external to the pool that should act as the origin
2846  *
2847  * If the pool device has discards disabled, they get disabled for the thin
2848  * device as well.
2849  */
2850 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2851 {
2852         int r;
2853         struct thin_c *tc;
2854         struct dm_dev *pool_dev, *origin_dev;
2855         struct mapped_device *pool_md;
2856
2857         mutex_lock(&dm_thin_pool_table.mutex);
2858
2859         if (argc != 2 && argc != 3) {
2860                 ti->error = "Invalid argument count";
2861                 r = -EINVAL;
2862                 goto out_unlock;
2863         }
2864
2865         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2866         if (!tc) {
2867                 ti->error = "Out of memory";
2868                 r = -ENOMEM;
2869                 goto out_unlock;
2870         }
2871
2872         if (argc == 3) {
2873                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2874                 if (r) {
2875                         ti->error = "Error opening origin device";
2876                         goto bad_origin_dev;
2877                 }
2878                 tc->origin_dev = origin_dev;
2879         }
2880
2881         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2882         if (r) {
2883                 ti->error = "Error opening pool device";
2884                 goto bad_pool_dev;
2885         }
2886         tc->pool_dev = pool_dev;
2887
2888         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2889                 ti->error = "Invalid device id";
2890                 r = -EINVAL;
2891                 goto bad_common;
2892         }
2893
2894         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2895         if (!pool_md) {
2896                 ti->error = "Couldn't get pool mapped device";
2897                 r = -EINVAL;
2898                 goto bad_common;
2899         }
2900
2901         tc->pool = __pool_table_lookup(pool_md);
2902         if (!tc->pool) {
2903                 ti->error = "Couldn't find pool object";
2904                 r = -EINVAL;
2905                 goto bad_pool_lookup;
2906         }
2907         __pool_inc(tc->pool);
2908
2909         if (get_pool_mode(tc->pool) == PM_FAIL) {
2910                 ti->error = "Couldn't open thin device, Pool is in fail mode";
2911                 r = -EINVAL;
2912                 goto bad_thin_open;
2913         }
2914
2915         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2916         if (r) {
2917                 ti->error = "Couldn't open thin internal device";
2918                 goto bad_thin_open;
2919         }
2920
2921         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2922         if (r)
2923                 goto bad_target_max_io_len;
2924
2925         ti->num_flush_bios = 1;
2926         ti->flush_supported = true;
2927         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2928
2929         /* In case the pool supports discards, pass them on. */
2930         ti->discard_zeroes_data_unsupported = true;
2931         if (tc->pool->pf.discard_enabled) {
2932                 ti->discards_supported = true;
2933                 ti->num_discard_bios = 1;
2934                 /* Discard bios must be split on a block boundary */
2935                 ti->split_discard_bios = true;
2936         }
2937
2938         dm_put(pool_md);
2939
2940         mutex_unlock(&dm_thin_pool_table.mutex);
2941
2942         return 0;
2943
2944 bad_target_max_io_len:
2945         dm_pool_close_thin_device(tc->td);
2946 bad_thin_open:
2947         __pool_dec(tc->pool);
2948 bad_pool_lookup:
2949         dm_put(pool_md);
2950 bad_common:
2951         dm_put_device(ti, tc->pool_dev);
2952 bad_pool_dev:
2953         if (tc->origin_dev)
2954                 dm_put_device(ti, tc->origin_dev);
2955 bad_origin_dev:
2956         kfree(tc);
2957 out_unlock:
2958         mutex_unlock(&dm_thin_pool_table.mutex);
2959
2960         return r;
2961 }
2962
2963 static int thin_map(struct dm_target *ti, struct bio *bio)
2964 {
2965         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2966
2967         return thin_bio_map(ti, bio);
2968 }
2969
2970 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2971 {
2972         unsigned long flags;
2973         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2974         struct list_head work;
2975         struct dm_thin_new_mapping *m, *tmp;
2976         struct pool *pool = h->tc->pool;
2977
2978         if (h->shared_read_entry) {
2979                 INIT_LIST_HEAD(&work);
2980                 dm_deferred_entry_dec(h->shared_read_entry, &work);
2981
2982                 spin_lock_irqsave(&pool->lock, flags);
2983                 list_for_each_entry_safe(m, tmp, &work, list) {
2984                         list_del(&m->list);
2985                         m->quiesced = true;
2986                         __maybe_add_mapping(m);
2987                 }
2988                 spin_unlock_irqrestore(&pool->lock, flags);
2989         }
2990
2991         if (h->all_io_entry) {
2992                 INIT_LIST_HEAD(&work);
2993                 dm_deferred_entry_dec(h->all_io_entry, &work);
2994                 if (!list_empty(&work)) {
2995                         spin_lock_irqsave(&pool->lock, flags);
2996                         list_for_each_entry_safe(m, tmp, &work, list)
2997                                 list_add_tail(&m->list, &pool->prepared_discards);
2998                         spin_unlock_irqrestore(&pool->lock, flags);
2999                         wake_worker(pool);
3000                 }
3001         }
3002
3003         return 0;
3004 }
3005
3006 static void thin_postsuspend(struct dm_target *ti)
3007 {
3008         if (dm_noflush_suspending(ti))
3009                 requeue_io((struct thin_c *)ti->private);
3010 }
3011
3012 /*
3013  * <nr mapped sectors> <highest mapped sector>
3014  */
3015 static void thin_status(struct dm_target *ti, status_type_t type,
3016                         unsigned status_flags, char *result, unsigned maxlen)
3017 {
3018         int r;
3019         ssize_t sz = 0;
3020         dm_block_t mapped, highest;
3021         char buf[BDEVNAME_SIZE];
3022         struct thin_c *tc = ti->private;
3023
3024         if (get_pool_mode(tc->pool) == PM_FAIL) {
3025                 DMEMIT("Fail");
3026                 return;
3027         }
3028
3029         if (!tc->td)
3030                 DMEMIT("-");
3031         else {
3032                 switch (type) {
3033                 case STATUSTYPE_INFO:
3034                         r = dm_thin_get_mapped_count(tc->td, &mapped);
3035                         if (r) {
3036                                 DMERR("dm_thin_get_mapped_count returned %d", r);
3037                                 goto err;
3038                         }
3039
3040                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3041                         if (r < 0) {
3042                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3043                                 goto err;
3044                         }
3045
3046                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3047                         if (r)
3048                                 DMEMIT("%llu", ((highest + 1) *
3049                                                 tc->pool->sectors_per_block) - 1);
3050                         else
3051                                 DMEMIT("-");
3052                         break;
3053
3054                 case STATUSTYPE_TABLE:
3055                         DMEMIT("%s %lu",
3056                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3057                                (unsigned long) tc->dev_id);
3058                         if (tc->origin_dev)
3059                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3060                         break;
3061                 }
3062         }
3063
3064         return;
3065
3066 err:
3067         DMEMIT("Error");
3068 }
3069
3070 static int thin_iterate_devices(struct dm_target *ti,
3071                                 iterate_devices_callout_fn fn, void *data)
3072 {
3073         sector_t blocks;
3074         struct thin_c *tc = ti->private;
3075         struct pool *pool = tc->pool;
3076
3077         /*
3078          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
3079          * we follow a more convoluted path through to the pool's target.
3080          */
3081         if (!pool->ti)
3082                 return 0;       /* nothing is bound */
3083
3084         blocks = pool->ti->len;
3085         (void) sector_div(blocks, pool->sectors_per_block);
3086         if (blocks)
3087                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
3088
3089         return 0;
3090 }
3091
3092 static struct target_type thin_target = {
3093         .name = "thin",
3094         .version = {1, 10, 0},
3095         .module = THIS_MODULE,
3096         .ctr = thin_ctr,
3097         .dtr = thin_dtr,
3098         .map = thin_map,
3099         .end_io = thin_endio,
3100         .postsuspend = thin_postsuspend,
3101         .status = thin_status,
3102         .iterate_devices = thin_iterate_devices,
3103 };
3104
3105 /*----------------------------------------------------------------*/
3106
3107 static int __init dm_thin_init(void)
3108 {
3109         int r;
3110
3111         pool_table_init();
3112
3113         r = dm_register_target(&thin_target);
3114         if (r)
3115                 return r;
3116
3117         r = dm_register_target(&pool_target);
3118         if (r)
3119                 goto bad_pool_target;
3120
3121         r = -ENOMEM;
3122
3123         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3124         if (!_new_mapping_cache)
3125                 goto bad_new_mapping_cache;
3126
3127         return 0;
3128
3129 bad_new_mapping_cache:
3130         dm_unregister_target(&pool_target);
3131 bad_pool_target:
3132         dm_unregister_target(&thin_target);
3133
3134         return r;
3135 }
3136
3137 static void dm_thin_exit(void)
3138 {
3139         dm_unregister_target(&thin_target);
3140         dm_unregister_target(&pool_target);
3141
3142         kmem_cache_destroy(_new_mapping_cache);
3143 }
3144
3145 module_init(dm_thin_init);
3146 module_exit(dm_thin_exit);
3147
3148 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3149 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3150 MODULE_LICENSE("GPL");