packaging: ivi: Do Not Use profile macro
[profile/ivi/kernel-x86-ivi.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX   "thin"
20
21 /*
22  * Tunable constants
23  */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28 #define NO_SPACE_TIMEOUT_SECS 60
29
30 static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
31
32 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
33                 "A percentage of time allocated for copy on write");
34
35 /*
36  * The block size of the device holding pool data must be
37  * between 64KB and 1GB.
38  */
39 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
40 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
41
42 /*
43  * Device id is restricted to 24 bits.
44  */
45 #define MAX_DEV_ID ((1 << 24) - 1)
46
47 /*
48  * How do we handle breaking sharing of data blocks?
49  * =================================================
50  *
51  * We use a standard copy-on-write btree to store the mappings for the
52  * devices (note I'm talking about copy-on-write of the metadata here, not
53  * the data).  When you take an internal snapshot you clone the root node
54  * of the origin btree.  After this there is no concept of an origin or a
55  * snapshot.  They are just two device trees that happen to point to the
56  * same data blocks.
57  *
58  * When we get a write in we decide if it's to a shared data block using
59  * some timestamp magic.  If it is, we have to break sharing.
60  *
61  * Let's say we write to a shared block in what was the origin.  The
62  * steps are:
63  *
64  * i) plug io further to this physical block. (see bio_prison code).
65  *
66  * ii) quiesce any read io to that shared data block.  Obviously
67  * including all devices that share this block.  (see dm_deferred_set code)
68  *
69  * iii) copy the data block to a newly allocate block.  This step can be
70  * missed out if the io covers the block. (schedule_copy).
71  *
72  * iv) insert the new mapping into the origin's btree
73  * (process_prepared_mapping).  This act of inserting breaks some
74  * sharing of btree nodes between the two devices.  Breaking sharing only
75  * effects the btree of that specific device.  Btrees for the other
76  * devices that share the block never change.  The btree for the origin
77  * device as it was after the last commit is untouched, ie. we're using
78  * persistent data structures in the functional programming sense.
79  *
80  * v) unplug io to this physical block, including the io that triggered
81  * the breaking of sharing.
82  *
83  * Steps (ii) and (iii) occur in parallel.
84  *
85  * The metadata _doesn't_ need to be committed before the io continues.  We
86  * get away with this because the io is always written to a _new_ block.
87  * If there's a crash, then:
88  *
89  * - The origin mapping will point to the old origin block (the shared
90  * one).  This will contain the data as it was before the io that triggered
91  * the breaking of sharing came in.
92  *
93  * - The snap mapping still points to the old block.  As it would after
94  * the commit.
95  *
96  * The downside of this scheme is the timestamp magic isn't perfect, and
97  * will continue to think that data block in the snapshot device is shared
98  * even after the write to the origin has broken sharing.  I suspect data
99  * blocks will typically be shared by many different devices, so we're
100  * breaking sharing n + 1 times, rather than n, where n is the number of
101  * devices that reference this data block.  At the moment I think the
102  * benefits far, far outweigh the disadvantages.
103  */
104
105 /*----------------------------------------------------------------*/
106
107 /*
108  * Key building.
109  */
110 static void build_data_key(struct dm_thin_device *td,
111                            dm_block_t b, struct dm_cell_key *key)
112 {
113         key->virtual = 0;
114         key->dev = dm_thin_dev_id(td);
115         key->block = b;
116 }
117
118 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
119                               struct dm_cell_key *key)
120 {
121         key->virtual = 1;
122         key->dev = dm_thin_dev_id(td);
123         key->block = b;
124 }
125
126 /*----------------------------------------------------------------*/
127
128 /*
129  * A pool device ties together a metadata device and a data device.  It
130  * also provides the interface for creating and destroying internal
131  * devices.
132  */
133 struct dm_thin_new_mapping;
134
135 /*
136  * The pool runs in 4 modes.  Ordered in degraded order for comparisons.
137  */
138 enum pool_mode {
139         PM_WRITE,               /* metadata may be changed */
140         PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
141         PM_READ_ONLY,           /* metadata may not be changed */
142         PM_FAIL,                /* all I/O fails */
143 };
144
145 struct pool_features {
146         enum pool_mode mode;
147
148         bool zero_new_blocks:1;
149         bool discard_enabled:1;
150         bool discard_passdown:1;
151         bool error_if_no_space:1;
152 };
153
154 struct thin_c;
155 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
156 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
157
158 struct pool {
159         struct list_head list;
160         struct dm_target *ti;   /* Only set if a pool target is bound */
161
162         struct mapped_device *pool_md;
163         struct block_device *md_dev;
164         struct dm_pool_metadata *pmd;
165
166         dm_block_t low_water_blocks;
167         uint32_t sectors_per_block;
168         int sectors_per_block_shift;
169
170         struct pool_features pf;
171         bool low_water_triggered:1;     /* A dm event has been sent */
172
173         struct dm_bio_prison *prison;
174         struct dm_kcopyd_client *copier;
175
176         struct workqueue_struct *wq;
177         struct work_struct worker;
178         struct delayed_work waker;
179         struct delayed_work no_space_timeout;
180
181         unsigned long last_commit_jiffies;
182         unsigned ref_count;
183
184         spinlock_t lock;
185         struct bio_list deferred_bios;
186         struct bio_list deferred_flush_bios;
187         struct list_head prepared_mappings;
188         struct list_head prepared_discards;
189
190         struct bio_list retry_on_resume_list;
191
192         struct dm_deferred_set *shared_read_ds;
193         struct dm_deferred_set *all_io_ds;
194
195         struct dm_thin_new_mapping *next_mapping;
196         mempool_t *mapping_pool;
197
198         process_bio_fn process_bio;
199         process_bio_fn process_discard;
200
201         process_mapping_fn process_prepared_mapping;
202         process_mapping_fn process_prepared_discard;
203 };
204
205 static enum pool_mode get_pool_mode(struct pool *pool);
206 static void metadata_operation_failed(struct pool *pool, const char *op, int r);
207
208 /*
209  * Target context for a pool.
210  */
211 struct pool_c {
212         struct dm_target *ti;
213         struct pool *pool;
214         struct dm_dev *data_dev;
215         struct dm_dev *metadata_dev;
216         struct dm_target_callbacks callbacks;
217
218         dm_block_t low_water_blocks;
219         struct pool_features requested_pf; /* Features requested during table load */
220         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
221 };
222
223 /*
224  * Target context for a thin.
225  */
226 struct thin_c {
227         struct dm_dev *pool_dev;
228         struct dm_dev *origin_dev;
229         dm_thin_id dev_id;
230
231         struct pool *pool;
232         struct dm_thin_device *td;
233         bool requeue_mode:1;
234 };
235
236 /*----------------------------------------------------------------*/
237
238 /*
239  * wake_worker() is used when new work is queued and when pool_resume is
240  * ready to continue deferred IO processing.
241  */
242 static void wake_worker(struct pool *pool)
243 {
244         queue_work(pool->wq, &pool->worker);
245 }
246
247 /*----------------------------------------------------------------*/
248
249 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
250                       struct dm_bio_prison_cell **cell_result)
251 {
252         int r;
253         struct dm_bio_prison_cell *cell_prealloc;
254
255         /*
256          * Allocate a cell from the prison's mempool.
257          * This might block but it can't fail.
258          */
259         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
260
261         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
262         if (r)
263                 /*
264                  * We reused an old cell; we can get rid of
265                  * the new one.
266                  */
267                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
268
269         return r;
270 }
271
272 static void cell_release(struct pool *pool,
273                          struct dm_bio_prison_cell *cell,
274                          struct bio_list *bios)
275 {
276         dm_cell_release(pool->prison, cell, bios);
277         dm_bio_prison_free_cell(pool->prison, cell);
278 }
279
280 static void cell_release_no_holder(struct pool *pool,
281                                    struct dm_bio_prison_cell *cell,
282                                    struct bio_list *bios)
283 {
284         dm_cell_release_no_holder(pool->prison, cell, bios);
285         dm_bio_prison_free_cell(pool->prison, cell);
286 }
287
288 static void cell_defer_no_holder_no_free(struct thin_c *tc,
289                                          struct dm_bio_prison_cell *cell)
290 {
291         struct pool *pool = tc->pool;
292         unsigned long flags;
293
294         spin_lock_irqsave(&pool->lock, flags);
295         dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
296         spin_unlock_irqrestore(&pool->lock, flags);
297
298         wake_worker(pool);
299 }
300
301 static void cell_error(struct pool *pool,
302                        struct dm_bio_prison_cell *cell)
303 {
304         dm_cell_error(pool->prison, cell);
305         dm_bio_prison_free_cell(pool->prison, cell);
306 }
307
308 /*----------------------------------------------------------------*/
309
310 /*
311  * A global list of pools that uses a struct mapped_device as a key.
312  */
313 static struct dm_thin_pool_table {
314         struct mutex mutex;
315         struct list_head pools;
316 } dm_thin_pool_table;
317
318 static void pool_table_init(void)
319 {
320         mutex_init(&dm_thin_pool_table.mutex);
321         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
322 }
323
324 static void __pool_table_insert(struct pool *pool)
325 {
326         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
327         list_add(&pool->list, &dm_thin_pool_table.pools);
328 }
329
330 static void __pool_table_remove(struct pool *pool)
331 {
332         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
333         list_del(&pool->list);
334 }
335
336 static struct pool *__pool_table_lookup(struct mapped_device *md)
337 {
338         struct pool *pool = NULL, *tmp;
339
340         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
341
342         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
343                 if (tmp->pool_md == md) {
344                         pool = tmp;
345                         break;
346                 }
347         }
348
349         return pool;
350 }
351
352 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
353 {
354         struct pool *pool = NULL, *tmp;
355
356         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
357
358         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
359                 if (tmp->md_dev == md_dev) {
360                         pool = tmp;
361                         break;
362                 }
363         }
364
365         return pool;
366 }
367
368 /*----------------------------------------------------------------*/
369
370 struct dm_thin_endio_hook {
371         struct thin_c *tc;
372         struct dm_deferred_entry *shared_read_entry;
373         struct dm_deferred_entry *all_io_entry;
374         struct dm_thin_new_mapping *overwrite_mapping;
375 };
376
377 static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
378 {
379         struct bio *bio;
380         struct bio_list bios;
381         unsigned long flags;
382
383         bio_list_init(&bios);
384
385         spin_lock_irqsave(&tc->pool->lock, flags);
386         bio_list_merge(&bios, master);
387         bio_list_init(master);
388         spin_unlock_irqrestore(&tc->pool->lock, flags);
389
390         while ((bio = bio_list_pop(&bios))) {
391                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
392
393                 if (h->tc == tc)
394                         bio_endio(bio, DM_ENDIO_REQUEUE);
395                 else
396                         bio_list_add(master, bio);
397         }
398 }
399
400 static void requeue_io(struct thin_c *tc)
401 {
402         struct pool *pool = tc->pool;
403
404         requeue_bio_list(tc, &pool->deferred_bios);
405         requeue_bio_list(tc, &pool->retry_on_resume_list);
406 }
407
408 static void error_retry_list(struct pool *pool)
409 {
410         struct bio *bio;
411         unsigned long flags;
412         struct bio_list bios;
413
414         bio_list_init(&bios);
415
416         spin_lock_irqsave(&pool->lock, flags);
417         bio_list_merge(&bios, &pool->retry_on_resume_list);
418         bio_list_init(&pool->retry_on_resume_list);
419         spin_unlock_irqrestore(&pool->lock, flags);
420
421         while ((bio = bio_list_pop(&bios)))
422                 bio_io_error(bio);
423 }
424
425 /*
426  * This section of code contains the logic for processing a thin device's IO.
427  * Much of the code depends on pool object resources (lists, workqueues, etc)
428  * but most is exclusively called from the thin target rather than the thin-pool
429  * target.
430  */
431
432 static bool block_size_is_power_of_two(struct pool *pool)
433 {
434         return pool->sectors_per_block_shift >= 0;
435 }
436
437 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
438 {
439         struct pool *pool = tc->pool;
440         sector_t block_nr = bio->bi_iter.bi_sector;
441
442         if (block_size_is_power_of_two(pool))
443                 block_nr >>= pool->sectors_per_block_shift;
444         else
445                 (void) sector_div(block_nr, pool->sectors_per_block);
446
447         return block_nr;
448 }
449
450 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
451 {
452         struct pool *pool = tc->pool;
453         sector_t bi_sector = bio->bi_iter.bi_sector;
454
455         bio->bi_bdev = tc->pool_dev->bdev;
456         if (block_size_is_power_of_two(pool))
457                 bio->bi_iter.bi_sector =
458                         (block << pool->sectors_per_block_shift) |
459                         (bi_sector & (pool->sectors_per_block - 1));
460         else
461                 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
462                                  sector_div(bi_sector, pool->sectors_per_block);
463 }
464
465 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
466 {
467         bio->bi_bdev = tc->origin_dev->bdev;
468 }
469
470 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
471 {
472         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
473                 dm_thin_changed_this_transaction(tc->td);
474 }
475
476 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
477 {
478         struct dm_thin_endio_hook *h;
479
480         if (bio->bi_rw & REQ_DISCARD)
481                 return;
482
483         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
484         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
485 }
486
487 static void issue(struct thin_c *tc, struct bio *bio)
488 {
489         struct pool *pool = tc->pool;
490         unsigned long flags;
491
492         if (!bio_triggers_commit(tc, bio)) {
493                 generic_make_request(bio);
494                 return;
495         }
496
497         /*
498          * Complete bio with an error if earlier I/O caused changes to
499          * the metadata that can't be committed e.g, due to I/O errors
500          * on the metadata device.
501          */
502         if (dm_thin_aborted_changes(tc->td)) {
503                 bio_io_error(bio);
504                 return;
505         }
506
507         /*
508          * Batch together any bios that trigger commits and then issue a
509          * single commit for them in process_deferred_bios().
510          */
511         spin_lock_irqsave(&pool->lock, flags);
512         bio_list_add(&pool->deferred_flush_bios, bio);
513         spin_unlock_irqrestore(&pool->lock, flags);
514 }
515
516 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
517 {
518         remap_to_origin(tc, bio);
519         issue(tc, bio);
520 }
521
522 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
523                             dm_block_t block)
524 {
525         remap(tc, bio, block);
526         issue(tc, bio);
527 }
528
529 /*----------------------------------------------------------------*/
530
531 /*
532  * Bio endio functions.
533  */
534 struct dm_thin_new_mapping {
535         struct list_head list;
536
537         bool quiesced:1;
538         bool prepared:1;
539         bool pass_discard:1;
540         bool definitely_not_shared:1;
541
542         int err;
543         struct thin_c *tc;
544         dm_block_t virt_block;
545         dm_block_t data_block;
546         struct dm_bio_prison_cell *cell, *cell2;
547
548         /*
549          * If the bio covers the whole area of a block then we can avoid
550          * zeroing or copying.  Instead this bio is hooked.  The bio will
551          * still be in the cell, so care has to be taken to avoid issuing
552          * the bio twice.
553          */
554         struct bio *bio;
555         bio_end_io_t *saved_bi_end_io;
556 };
557
558 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
559 {
560         struct pool *pool = m->tc->pool;
561
562         if (m->quiesced && m->prepared) {
563                 list_add_tail(&m->list, &pool->prepared_mappings);
564                 wake_worker(pool);
565         }
566 }
567
568 static void copy_complete(int read_err, unsigned long write_err, void *context)
569 {
570         unsigned long flags;
571         struct dm_thin_new_mapping *m = context;
572         struct pool *pool = m->tc->pool;
573
574         m->err = read_err || write_err ? -EIO : 0;
575
576         spin_lock_irqsave(&pool->lock, flags);
577         m->prepared = true;
578         __maybe_add_mapping(m);
579         spin_unlock_irqrestore(&pool->lock, flags);
580 }
581
582 static void overwrite_endio(struct bio *bio, int err)
583 {
584         unsigned long flags;
585         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
586         struct dm_thin_new_mapping *m = h->overwrite_mapping;
587         struct pool *pool = m->tc->pool;
588
589         m->err = err;
590
591         spin_lock_irqsave(&pool->lock, flags);
592         m->prepared = true;
593         __maybe_add_mapping(m);
594         spin_unlock_irqrestore(&pool->lock, flags);
595 }
596
597 /*----------------------------------------------------------------*/
598
599 /*
600  * Workqueue.
601  */
602
603 /*
604  * Prepared mapping jobs.
605  */
606
607 /*
608  * This sends the bios in the cell back to the deferred_bios list.
609  */
610 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
611 {
612         struct pool *pool = tc->pool;
613         unsigned long flags;
614
615         spin_lock_irqsave(&pool->lock, flags);
616         cell_release(pool, cell, &pool->deferred_bios);
617         spin_unlock_irqrestore(&tc->pool->lock, flags);
618
619         wake_worker(pool);
620 }
621
622 /*
623  * Same as cell_defer above, except it omits the original holder of the cell.
624  */
625 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
626 {
627         struct pool *pool = tc->pool;
628         unsigned long flags;
629
630         spin_lock_irqsave(&pool->lock, flags);
631         cell_release_no_holder(pool, cell, &pool->deferred_bios);
632         spin_unlock_irqrestore(&pool->lock, flags);
633
634         wake_worker(pool);
635 }
636
637 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
638 {
639         if (m->bio) {
640                 m->bio->bi_end_io = m->saved_bi_end_io;
641                 atomic_inc(&m->bio->bi_remaining);
642         }
643         cell_error(m->tc->pool, m->cell);
644         list_del(&m->list);
645         mempool_free(m, m->tc->pool->mapping_pool);
646 }
647
648 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
649 {
650         struct thin_c *tc = m->tc;
651         struct pool *pool = tc->pool;
652         struct bio *bio;
653         int r;
654
655         bio = m->bio;
656         if (bio) {
657                 bio->bi_end_io = m->saved_bi_end_io;
658                 atomic_inc(&bio->bi_remaining);
659         }
660
661         if (m->err) {
662                 cell_error(pool, m->cell);
663                 goto out;
664         }
665
666         /*
667          * Commit the prepared block into the mapping btree.
668          * Any I/O for this block arriving after this point will get
669          * remapped to it directly.
670          */
671         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
672         if (r) {
673                 metadata_operation_failed(pool, "dm_thin_insert_block", r);
674                 cell_error(pool, m->cell);
675                 goto out;
676         }
677
678         /*
679          * Release any bios held while the block was being provisioned.
680          * If we are processing a write bio that completely covers the block,
681          * we already processed it so can ignore it now when processing
682          * the bios in the cell.
683          */
684         if (bio) {
685                 cell_defer_no_holder(tc, m->cell);
686                 bio_endio(bio, 0);
687         } else
688                 cell_defer(tc, m->cell);
689
690 out:
691         list_del(&m->list);
692         mempool_free(m, pool->mapping_pool);
693 }
694
695 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
696 {
697         struct thin_c *tc = m->tc;
698
699         bio_io_error(m->bio);
700         cell_defer_no_holder(tc, m->cell);
701         cell_defer_no_holder(tc, m->cell2);
702         mempool_free(m, tc->pool->mapping_pool);
703 }
704
705 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
706 {
707         struct thin_c *tc = m->tc;
708
709         inc_all_io_entry(tc->pool, m->bio);
710         cell_defer_no_holder(tc, m->cell);
711         cell_defer_no_holder(tc, m->cell2);
712
713         if (m->pass_discard)
714                 if (m->definitely_not_shared)
715                         remap_and_issue(tc, m->bio, m->data_block);
716                 else {
717                         bool used = false;
718                         if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
719                                 bio_endio(m->bio, 0);
720                         else
721                                 remap_and_issue(tc, m->bio, m->data_block);
722                 }
723         else
724                 bio_endio(m->bio, 0);
725
726         mempool_free(m, tc->pool->mapping_pool);
727 }
728
729 static void process_prepared_discard(struct dm_thin_new_mapping *m)
730 {
731         int r;
732         struct thin_c *tc = m->tc;
733
734         r = dm_thin_remove_block(tc->td, m->virt_block);
735         if (r)
736                 DMERR_LIMIT("dm_thin_remove_block() failed");
737
738         process_prepared_discard_passdown(m);
739 }
740
741 static void process_prepared(struct pool *pool, struct list_head *head,
742                              process_mapping_fn *fn)
743 {
744         unsigned long flags;
745         struct list_head maps;
746         struct dm_thin_new_mapping *m, *tmp;
747
748         INIT_LIST_HEAD(&maps);
749         spin_lock_irqsave(&pool->lock, flags);
750         list_splice_init(head, &maps);
751         spin_unlock_irqrestore(&pool->lock, flags);
752
753         list_for_each_entry_safe(m, tmp, &maps, list)
754                 (*fn)(m);
755 }
756
757 /*
758  * Deferred bio jobs.
759  */
760 static int io_overlaps_block(struct pool *pool, struct bio *bio)
761 {
762         return bio->bi_iter.bi_size ==
763                 (pool->sectors_per_block << SECTOR_SHIFT);
764 }
765
766 static int io_overwrites_block(struct pool *pool, struct bio *bio)
767 {
768         return (bio_data_dir(bio) == WRITE) &&
769                 io_overlaps_block(pool, bio);
770 }
771
772 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
773                                bio_end_io_t *fn)
774 {
775         *save = bio->bi_end_io;
776         bio->bi_end_io = fn;
777 }
778
779 static int ensure_next_mapping(struct pool *pool)
780 {
781         if (pool->next_mapping)
782                 return 0;
783
784         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
785
786         return pool->next_mapping ? 0 : -ENOMEM;
787 }
788
789 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
790 {
791         struct dm_thin_new_mapping *m = pool->next_mapping;
792
793         BUG_ON(!pool->next_mapping);
794
795         memset(m, 0, sizeof(struct dm_thin_new_mapping));
796         INIT_LIST_HEAD(&m->list);
797         m->bio = NULL;
798
799         pool->next_mapping = NULL;
800
801         return m;
802 }
803
804 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
805                           struct dm_dev *origin, dm_block_t data_origin,
806                           dm_block_t data_dest,
807                           struct dm_bio_prison_cell *cell, struct bio *bio)
808 {
809         int r;
810         struct pool *pool = tc->pool;
811         struct dm_thin_new_mapping *m = get_next_mapping(pool);
812
813         m->tc = tc;
814         m->virt_block = virt_block;
815         m->data_block = data_dest;
816         m->cell = cell;
817
818         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
819                 m->quiesced = true;
820
821         /*
822          * IO to pool_dev remaps to the pool target's data_dev.
823          *
824          * If the whole block of data is being overwritten, we can issue the
825          * bio immediately. Otherwise we use kcopyd to clone the data first.
826          */
827         if (io_overwrites_block(pool, bio)) {
828                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
829
830                 h->overwrite_mapping = m;
831                 m->bio = bio;
832                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
833                 inc_all_io_entry(pool, bio);
834                 remap_and_issue(tc, bio, data_dest);
835         } else {
836                 struct dm_io_region from, to;
837
838                 from.bdev = origin->bdev;
839                 from.sector = data_origin * pool->sectors_per_block;
840                 from.count = pool->sectors_per_block;
841
842                 to.bdev = tc->pool_dev->bdev;
843                 to.sector = data_dest * pool->sectors_per_block;
844                 to.count = pool->sectors_per_block;
845
846                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
847                                    0, copy_complete, m);
848                 if (r < 0) {
849                         mempool_free(m, pool->mapping_pool);
850                         DMERR_LIMIT("dm_kcopyd_copy() failed");
851                         cell_error(pool, cell);
852                 }
853         }
854 }
855
856 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
857                                    dm_block_t data_origin, dm_block_t data_dest,
858                                    struct dm_bio_prison_cell *cell, struct bio *bio)
859 {
860         schedule_copy(tc, virt_block, tc->pool_dev,
861                       data_origin, data_dest, cell, bio);
862 }
863
864 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
865                                    dm_block_t data_dest,
866                                    struct dm_bio_prison_cell *cell, struct bio *bio)
867 {
868         schedule_copy(tc, virt_block, tc->origin_dev,
869                       virt_block, data_dest, cell, bio);
870 }
871
872 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
873                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
874                           struct bio *bio)
875 {
876         struct pool *pool = tc->pool;
877         struct dm_thin_new_mapping *m = get_next_mapping(pool);
878
879         m->quiesced = true;
880         m->prepared = false;
881         m->tc = tc;
882         m->virt_block = virt_block;
883         m->data_block = data_block;
884         m->cell = cell;
885
886         /*
887          * If the whole block of data is being overwritten or we are not
888          * zeroing pre-existing data, we can issue the bio immediately.
889          * Otherwise we use kcopyd to zero the data first.
890          */
891         if (!pool->pf.zero_new_blocks)
892                 process_prepared_mapping(m);
893
894         else if (io_overwrites_block(pool, bio)) {
895                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
896
897                 h->overwrite_mapping = m;
898                 m->bio = bio;
899                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
900                 inc_all_io_entry(pool, bio);
901                 remap_and_issue(tc, bio, data_block);
902         } else {
903                 int r;
904                 struct dm_io_region to;
905
906                 to.bdev = tc->pool_dev->bdev;
907                 to.sector = data_block * pool->sectors_per_block;
908                 to.count = pool->sectors_per_block;
909
910                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
911                 if (r < 0) {
912                         mempool_free(m, pool->mapping_pool);
913                         DMERR_LIMIT("dm_kcopyd_zero() failed");
914                         cell_error(pool, cell);
915                 }
916         }
917 }
918
919 /*
920  * A non-zero return indicates read_only or fail_io mode.
921  * Many callers don't care about the return value.
922  */
923 static int commit(struct pool *pool)
924 {
925         int r;
926
927         if (get_pool_mode(pool) >= PM_READ_ONLY)
928                 return -EINVAL;
929
930         r = dm_pool_commit_metadata(pool->pmd);
931         if (r)
932                 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
933
934         return r;
935 }
936
937 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
938 {
939         unsigned long flags;
940
941         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
942                 DMWARN("%s: reached low water mark for data device: sending event.",
943                        dm_device_name(pool->pool_md));
944                 spin_lock_irqsave(&pool->lock, flags);
945                 pool->low_water_triggered = true;
946                 spin_unlock_irqrestore(&pool->lock, flags);
947                 dm_table_event(pool->ti->table);
948         }
949 }
950
951 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
952
953 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
954 {
955         int r;
956         dm_block_t free_blocks;
957         struct pool *pool = tc->pool;
958
959         if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
960                 return -EINVAL;
961
962         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
963         if (r) {
964                 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
965                 return r;
966         }
967
968         check_low_water_mark(pool, free_blocks);
969
970         if (!free_blocks) {
971                 /*
972                  * Try to commit to see if that will free up some
973                  * more space.
974                  */
975                 r = commit(pool);
976                 if (r)
977                         return r;
978
979                 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
980                 if (r) {
981                         metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
982                         return r;
983                 }
984
985                 if (!free_blocks) {
986                         set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
987                         return -ENOSPC;
988                 }
989         }
990
991         r = dm_pool_alloc_data_block(pool->pmd, result);
992         if (r) {
993                 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
994                 return r;
995         }
996
997         return 0;
998 }
999
1000 /*
1001  * If we have run out of space, queue bios until the device is
1002  * resumed, presumably after having been reloaded with more space.
1003  */
1004 static void retry_on_resume(struct bio *bio)
1005 {
1006         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1007         struct thin_c *tc = h->tc;
1008         struct pool *pool = tc->pool;
1009         unsigned long flags;
1010
1011         spin_lock_irqsave(&pool->lock, flags);
1012         bio_list_add(&pool->retry_on_resume_list, bio);
1013         spin_unlock_irqrestore(&pool->lock, flags);
1014 }
1015
1016 static bool should_error_unserviceable_bio(struct pool *pool)
1017 {
1018         enum pool_mode m = get_pool_mode(pool);
1019
1020         switch (m) {
1021         case PM_WRITE:
1022                 /* Shouldn't get here */
1023                 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1024                 return true;
1025
1026         case PM_OUT_OF_DATA_SPACE:
1027                 return pool->pf.error_if_no_space;
1028
1029         case PM_READ_ONLY:
1030         case PM_FAIL:
1031                 return true;
1032         default:
1033                 /* Shouldn't get here */
1034                 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1035                 return true;
1036         }
1037 }
1038
1039 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1040 {
1041         if (should_error_unserviceable_bio(pool))
1042                 bio_io_error(bio);
1043         else
1044                 retry_on_resume(bio);
1045 }
1046
1047 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1048 {
1049         struct bio *bio;
1050         struct bio_list bios;
1051
1052         if (should_error_unserviceable_bio(pool)) {
1053                 cell_error(pool, cell);
1054                 return;
1055         }
1056
1057         bio_list_init(&bios);
1058         cell_release(pool, cell, &bios);
1059
1060         if (should_error_unserviceable_bio(pool))
1061                 while ((bio = bio_list_pop(&bios)))
1062                         bio_io_error(bio);
1063         else
1064                 while ((bio = bio_list_pop(&bios)))
1065                         retry_on_resume(bio);
1066 }
1067
1068 static void process_discard(struct thin_c *tc, struct bio *bio)
1069 {
1070         int r;
1071         unsigned long flags;
1072         struct pool *pool = tc->pool;
1073         struct dm_bio_prison_cell *cell, *cell2;
1074         struct dm_cell_key key, key2;
1075         dm_block_t block = get_bio_block(tc, bio);
1076         struct dm_thin_lookup_result lookup_result;
1077         struct dm_thin_new_mapping *m;
1078
1079         build_virtual_key(tc->td, block, &key);
1080         if (bio_detain(tc->pool, &key, bio, &cell))
1081                 return;
1082
1083         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1084         switch (r) {
1085         case 0:
1086                 /*
1087                  * Check nobody is fiddling with this pool block.  This can
1088                  * happen if someone's in the process of breaking sharing
1089                  * on this block.
1090                  */
1091                 build_data_key(tc->td, lookup_result.block, &key2);
1092                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1093                         cell_defer_no_holder(tc, cell);
1094                         break;
1095                 }
1096
1097                 if (io_overlaps_block(pool, bio)) {
1098                         /*
1099                          * IO may still be going to the destination block.  We must
1100                          * quiesce before we can do the removal.
1101                          */
1102                         m = get_next_mapping(pool);
1103                         m->tc = tc;
1104                         m->pass_discard = pool->pf.discard_passdown;
1105                         m->definitely_not_shared = !lookup_result.shared;
1106                         m->virt_block = block;
1107                         m->data_block = lookup_result.block;
1108                         m->cell = cell;
1109                         m->cell2 = cell2;
1110                         m->bio = bio;
1111
1112                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1113                                 spin_lock_irqsave(&pool->lock, flags);
1114                                 list_add_tail(&m->list, &pool->prepared_discards);
1115                                 spin_unlock_irqrestore(&pool->lock, flags);
1116                                 wake_worker(pool);
1117                         }
1118                 } else {
1119                         inc_all_io_entry(pool, bio);
1120                         cell_defer_no_holder(tc, cell);
1121                         cell_defer_no_holder(tc, cell2);
1122
1123                         /*
1124                          * The DM core makes sure that the discard doesn't span
1125                          * a block boundary.  So we submit the discard of a
1126                          * partial block appropriately.
1127                          */
1128                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1129                                 remap_and_issue(tc, bio, lookup_result.block);
1130                         else
1131                                 bio_endio(bio, 0);
1132                 }
1133                 break;
1134
1135         case -ENODATA:
1136                 /*
1137                  * It isn't provisioned, just forget it.
1138                  */
1139                 cell_defer_no_holder(tc, cell);
1140                 bio_endio(bio, 0);
1141                 break;
1142
1143         default:
1144                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1145                             __func__, r);
1146                 cell_defer_no_holder(tc, cell);
1147                 bio_io_error(bio);
1148                 break;
1149         }
1150 }
1151
1152 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1153                           struct dm_cell_key *key,
1154                           struct dm_thin_lookup_result *lookup_result,
1155                           struct dm_bio_prison_cell *cell)
1156 {
1157         int r;
1158         dm_block_t data_block;
1159         struct pool *pool = tc->pool;
1160
1161         r = alloc_data_block(tc, &data_block);
1162         switch (r) {
1163         case 0:
1164                 schedule_internal_copy(tc, block, lookup_result->block,
1165                                        data_block, cell, bio);
1166                 break;
1167
1168         case -ENOSPC:
1169                 retry_bios_on_resume(pool, cell);
1170                 break;
1171
1172         default:
1173                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1174                             __func__, r);
1175                 cell_error(pool, cell);
1176                 break;
1177         }
1178 }
1179
1180 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1181                                dm_block_t block,
1182                                struct dm_thin_lookup_result *lookup_result)
1183 {
1184         struct dm_bio_prison_cell *cell;
1185         struct pool *pool = tc->pool;
1186         struct dm_cell_key key;
1187
1188         /*
1189          * If cell is already occupied, then sharing is already in the process
1190          * of being broken so we have nothing further to do here.
1191          */
1192         build_data_key(tc->td, lookup_result->block, &key);
1193         if (bio_detain(pool, &key, bio, &cell))
1194                 return;
1195
1196         if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1197                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1198         else {
1199                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1200
1201                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1202                 inc_all_io_entry(pool, bio);
1203                 cell_defer_no_holder(tc, cell);
1204
1205                 remap_and_issue(tc, bio, lookup_result->block);
1206         }
1207 }
1208
1209 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1210                             struct dm_bio_prison_cell *cell)
1211 {
1212         int r;
1213         dm_block_t data_block;
1214         struct pool *pool = tc->pool;
1215
1216         /*
1217          * Remap empty bios (flushes) immediately, without provisioning.
1218          */
1219         if (!bio->bi_iter.bi_size) {
1220                 inc_all_io_entry(pool, bio);
1221                 cell_defer_no_holder(tc, cell);
1222
1223                 remap_and_issue(tc, bio, 0);
1224                 return;
1225         }
1226
1227         /*
1228          * Fill read bios with zeroes and complete them immediately.
1229          */
1230         if (bio_data_dir(bio) == READ) {
1231                 zero_fill_bio(bio);
1232                 cell_defer_no_holder(tc, cell);
1233                 bio_endio(bio, 0);
1234                 return;
1235         }
1236
1237         r = alloc_data_block(tc, &data_block);
1238         switch (r) {
1239         case 0:
1240                 if (tc->origin_dev)
1241                         schedule_external_copy(tc, block, data_block, cell, bio);
1242                 else
1243                         schedule_zero(tc, block, data_block, cell, bio);
1244                 break;
1245
1246         case -ENOSPC:
1247                 retry_bios_on_resume(pool, cell);
1248                 break;
1249
1250         default:
1251                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1252                             __func__, r);
1253                 cell_error(pool, cell);
1254                 break;
1255         }
1256 }
1257
1258 static void process_bio(struct thin_c *tc, struct bio *bio)
1259 {
1260         int r;
1261         struct pool *pool = tc->pool;
1262         dm_block_t block = get_bio_block(tc, bio);
1263         struct dm_bio_prison_cell *cell;
1264         struct dm_cell_key key;
1265         struct dm_thin_lookup_result lookup_result;
1266
1267         /*
1268          * If cell is already occupied, then the block is already
1269          * being provisioned so we have nothing further to do here.
1270          */
1271         build_virtual_key(tc->td, block, &key);
1272         if (bio_detain(pool, &key, bio, &cell))
1273                 return;
1274
1275         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1276         switch (r) {
1277         case 0:
1278                 if (lookup_result.shared) {
1279                         process_shared_bio(tc, bio, block, &lookup_result);
1280                         cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1281                 } else {
1282                         inc_all_io_entry(pool, bio);
1283                         cell_defer_no_holder(tc, cell);
1284
1285                         remap_and_issue(tc, bio, lookup_result.block);
1286                 }
1287                 break;
1288
1289         case -ENODATA:
1290                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1291                         inc_all_io_entry(pool, bio);
1292                         cell_defer_no_holder(tc, cell);
1293
1294                         remap_to_origin_and_issue(tc, bio);
1295                 } else
1296                         provision_block(tc, bio, block, cell);
1297                 break;
1298
1299         default:
1300                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1301                             __func__, r);
1302                 cell_defer_no_holder(tc, cell);
1303                 bio_io_error(bio);
1304                 break;
1305         }
1306 }
1307
1308 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1309 {
1310         int r;
1311         int rw = bio_data_dir(bio);
1312         dm_block_t block = get_bio_block(tc, bio);
1313         struct dm_thin_lookup_result lookup_result;
1314
1315         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1316         switch (r) {
1317         case 0:
1318                 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1319                         handle_unserviceable_bio(tc->pool, bio);
1320                 else {
1321                         inc_all_io_entry(tc->pool, bio);
1322                         remap_and_issue(tc, bio, lookup_result.block);
1323                 }
1324                 break;
1325
1326         case -ENODATA:
1327                 if (rw != READ) {
1328                         handle_unserviceable_bio(tc->pool, bio);
1329                         break;
1330                 }
1331
1332                 if (tc->origin_dev) {
1333                         inc_all_io_entry(tc->pool, bio);
1334                         remap_to_origin_and_issue(tc, bio);
1335                         break;
1336                 }
1337
1338                 zero_fill_bio(bio);
1339                 bio_endio(bio, 0);
1340                 break;
1341
1342         default:
1343                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1344                             __func__, r);
1345                 bio_io_error(bio);
1346                 break;
1347         }
1348 }
1349
1350 static void process_bio_success(struct thin_c *tc, struct bio *bio)
1351 {
1352         bio_endio(bio, 0);
1353 }
1354
1355 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1356 {
1357         bio_io_error(bio);
1358 }
1359
1360 /*
1361  * FIXME: should we also commit due to size of transaction, measured in
1362  * metadata blocks?
1363  */
1364 static int need_commit_due_to_time(struct pool *pool)
1365 {
1366         return jiffies < pool->last_commit_jiffies ||
1367                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1368 }
1369
1370 static void process_deferred_bios(struct pool *pool)
1371 {
1372         unsigned long flags;
1373         struct bio *bio;
1374         struct bio_list bios;
1375
1376         bio_list_init(&bios);
1377
1378         spin_lock_irqsave(&pool->lock, flags);
1379         bio_list_merge(&bios, &pool->deferred_bios);
1380         bio_list_init(&pool->deferred_bios);
1381         spin_unlock_irqrestore(&pool->lock, flags);
1382
1383         while ((bio = bio_list_pop(&bios))) {
1384                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1385                 struct thin_c *tc = h->tc;
1386
1387                 if (tc->requeue_mode) {
1388                         bio_endio(bio, DM_ENDIO_REQUEUE);
1389                         continue;
1390                 }
1391
1392                 /*
1393                  * If we've got no free new_mapping structs, and processing
1394                  * this bio might require one, we pause until there are some
1395                  * prepared mappings to process.
1396                  */
1397                 if (ensure_next_mapping(pool)) {
1398                         spin_lock_irqsave(&pool->lock, flags);
1399                         bio_list_add(&pool->deferred_bios, bio);
1400                         bio_list_merge(&pool->deferred_bios, &bios);
1401                         spin_unlock_irqrestore(&pool->lock, flags);
1402                         break;
1403                 }
1404
1405                 if (bio->bi_rw & REQ_DISCARD)
1406                         pool->process_discard(tc, bio);
1407                 else
1408                         pool->process_bio(tc, bio);
1409         }
1410
1411         /*
1412          * If there are any deferred flush bios, we must commit
1413          * the metadata before issuing them.
1414          */
1415         bio_list_init(&bios);
1416         spin_lock_irqsave(&pool->lock, flags);
1417         bio_list_merge(&bios, &pool->deferred_flush_bios);
1418         bio_list_init(&pool->deferred_flush_bios);
1419         spin_unlock_irqrestore(&pool->lock, flags);
1420
1421         if (bio_list_empty(&bios) &&
1422             !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1423                 return;
1424
1425         if (commit(pool)) {
1426                 while ((bio = bio_list_pop(&bios)))
1427                         bio_io_error(bio);
1428                 return;
1429         }
1430         pool->last_commit_jiffies = jiffies;
1431
1432         while ((bio = bio_list_pop(&bios)))
1433                 generic_make_request(bio);
1434 }
1435
1436 static void do_worker(struct work_struct *ws)
1437 {
1438         struct pool *pool = container_of(ws, struct pool, worker);
1439
1440         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1441         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1442         process_deferred_bios(pool);
1443 }
1444
1445 /*
1446  * We want to commit periodically so that not too much
1447  * unwritten data builds up.
1448  */
1449 static void do_waker(struct work_struct *ws)
1450 {
1451         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1452         wake_worker(pool);
1453         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1454 }
1455
1456 /*
1457  * We're holding onto IO to allow userland time to react.  After the
1458  * timeout either the pool will have been resized (and thus back in
1459  * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1460  */
1461 static void do_no_space_timeout(struct work_struct *ws)
1462 {
1463         struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1464                                          no_space_timeout);
1465
1466         if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1467                 set_pool_mode(pool, PM_READ_ONLY);
1468 }
1469
1470 /*----------------------------------------------------------------*/
1471
1472 struct noflush_work {
1473         struct work_struct worker;
1474         struct thin_c *tc;
1475
1476         atomic_t complete;
1477         wait_queue_head_t wait;
1478 };
1479
1480 static void complete_noflush_work(struct noflush_work *w)
1481 {
1482         atomic_set(&w->complete, 1);
1483         wake_up(&w->wait);
1484 }
1485
1486 static void do_noflush_start(struct work_struct *ws)
1487 {
1488         struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1489         w->tc->requeue_mode = true;
1490         requeue_io(w->tc);
1491         complete_noflush_work(w);
1492 }
1493
1494 static void do_noflush_stop(struct work_struct *ws)
1495 {
1496         struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1497         w->tc->requeue_mode = false;
1498         complete_noflush_work(w);
1499 }
1500
1501 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1502 {
1503         struct noflush_work w;
1504
1505         INIT_WORK(&w.worker, fn);
1506         w.tc = tc;
1507         atomic_set(&w.complete, 0);
1508         init_waitqueue_head(&w.wait);
1509
1510         queue_work(tc->pool->wq, &w.worker);
1511
1512         wait_event(w.wait, atomic_read(&w.complete));
1513 }
1514
1515 /*----------------------------------------------------------------*/
1516
1517 static enum pool_mode get_pool_mode(struct pool *pool)
1518 {
1519         return pool->pf.mode;
1520 }
1521
1522 static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
1523 {
1524         dm_table_event(pool->ti->table);
1525         DMINFO("%s: switching pool to %s mode",
1526                dm_device_name(pool->pool_md), new_mode);
1527 }
1528
1529 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
1530 {
1531         struct pool_c *pt = pool->ti->private;
1532         bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1533         enum pool_mode old_mode = get_pool_mode(pool);
1534         unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
1535
1536         /*
1537          * Never allow the pool to transition to PM_WRITE mode if user
1538          * intervention is required to verify metadata and data consistency.
1539          */
1540         if (new_mode == PM_WRITE && needs_check) {
1541                 DMERR("%s: unable to switch pool to write mode until repaired.",
1542                       dm_device_name(pool->pool_md));
1543                 if (old_mode != new_mode)
1544                         new_mode = old_mode;
1545                 else
1546                         new_mode = PM_READ_ONLY;
1547         }
1548         /*
1549          * If we were in PM_FAIL mode, rollback of metadata failed.  We're
1550          * not going to recover without a thin_repair.  So we never let the
1551          * pool move out of the old mode.
1552          */
1553         if (old_mode == PM_FAIL)
1554                 new_mode = old_mode;
1555
1556         switch (new_mode) {
1557         case PM_FAIL:
1558                 if (old_mode != new_mode)
1559                         notify_of_pool_mode_change(pool, "failure");
1560                 dm_pool_metadata_read_only(pool->pmd);
1561                 pool->process_bio = process_bio_fail;
1562                 pool->process_discard = process_bio_fail;
1563                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1564                 pool->process_prepared_discard = process_prepared_discard_fail;
1565
1566                 error_retry_list(pool);
1567                 break;
1568
1569         case PM_READ_ONLY:
1570                 if (old_mode != new_mode)
1571                         notify_of_pool_mode_change(pool, "read-only");
1572                 dm_pool_metadata_read_only(pool->pmd);
1573                 pool->process_bio = process_bio_read_only;
1574                 pool->process_discard = process_bio_success;
1575                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1576                 pool->process_prepared_discard = process_prepared_discard_passdown;
1577
1578                 error_retry_list(pool);
1579                 break;
1580
1581         case PM_OUT_OF_DATA_SPACE:
1582                 /*
1583                  * Ideally we'd never hit this state; the low water mark
1584                  * would trigger userland to extend the pool before we
1585                  * completely run out of data space.  However, many small
1586                  * IOs to unprovisioned space can consume data space at an
1587                  * alarming rate.  Adjust your low water mark if you're
1588                  * frequently seeing this mode.
1589                  */
1590                 if (old_mode != new_mode)
1591                         notify_of_pool_mode_change(pool, "out-of-data-space");
1592                 pool->process_bio = process_bio_read_only;
1593                 pool->process_discard = process_discard;
1594                 pool->process_prepared_mapping = process_prepared_mapping;
1595                 pool->process_prepared_discard = process_prepared_discard_passdown;
1596
1597                 if (!pool->pf.error_if_no_space && no_space_timeout)
1598                         queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
1599                 break;
1600
1601         case PM_WRITE:
1602                 if (old_mode != new_mode)
1603                         notify_of_pool_mode_change(pool, "write");
1604                 dm_pool_metadata_read_write(pool->pmd);
1605                 pool->process_bio = process_bio;
1606                 pool->process_discard = process_discard;
1607                 pool->process_prepared_mapping = process_prepared_mapping;
1608                 pool->process_prepared_discard = process_prepared_discard;
1609                 break;
1610         }
1611
1612         pool->pf.mode = new_mode;
1613         /*
1614          * The pool mode may have changed, sync it so bind_control_target()
1615          * doesn't cause an unexpected mode transition on resume.
1616          */
1617         pt->adjusted_pf.mode = new_mode;
1618 }
1619
1620 static void abort_transaction(struct pool *pool)
1621 {
1622         const char *dev_name = dm_device_name(pool->pool_md);
1623
1624         DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1625         if (dm_pool_abort_metadata(pool->pmd)) {
1626                 DMERR("%s: failed to abort metadata transaction", dev_name);
1627                 set_pool_mode(pool, PM_FAIL);
1628         }
1629
1630         if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1631                 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1632                 set_pool_mode(pool, PM_FAIL);
1633         }
1634 }
1635
1636 static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1637 {
1638         DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1639                     dm_device_name(pool->pool_md), op, r);
1640
1641         abort_transaction(pool);
1642         set_pool_mode(pool, PM_READ_ONLY);
1643 }
1644
1645 /*----------------------------------------------------------------*/
1646
1647 /*
1648  * Mapping functions.
1649  */
1650
1651 /*
1652  * Called only while mapping a thin bio to hand it over to the workqueue.
1653  */
1654 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1655 {
1656         unsigned long flags;
1657         struct pool *pool = tc->pool;
1658
1659         spin_lock_irqsave(&pool->lock, flags);
1660         bio_list_add(&pool->deferred_bios, bio);
1661         spin_unlock_irqrestore(&pool->lock, flags);
1662
1663         wake_worker(pool);
1664 }
1665
1666 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1667 {
1668         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1669
1670         h->tc = tc;
1671         h->shared_read_entry = NULL;
1672         h->all_io_entry = NULL;
1673         h->overwrite_mapping = NULL;
1674 }
1675
1676 /*
1677  * Non-blocking function called from the thin target's map function.
1678  */
1679 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1680 {
1681         int r;
1682         struct thin_c *tc = ti->private;
1683         dm_block_t block = get_bio_block(tc, bio);
1684         struct dm_thin_device *td = tc->td;
1685         struct dm_thin_lookup_result result;
1686         struct dm_bio_prison_cell cell1, cell2;
1687         struct dm_bio_prison_cell *cell_result;
1688         struct dm_cell_key key;
1689
1690         thin_hook_bio(tc, bio);
1691
1692         if (tc->requeue_mode) {
1693                 bio_endio(bio, DM_ENDIO_REQUEUE);
1694                 return DM_MAPIO_SUBMITTED;
1695         }
1696
1697         if (get_pool_mode(tc->pool) == PM_FAIL) {
1698                 bio_io_error(bio);
1699                 return DM_MAPIO_SUBMITTED;
1700         }
1701
1702         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1703                 thin_defer_bio(tc, bio);
1704                 return DM_MAPIO_SUBMITTED;
1705         }
1706
1707         r = dm_thin_find_block(td, block, 0, &result);
1708
1709         /*
1710          * Note that we defer readahead too.
1711          */
1712         switch (r) {
1713         case 0:
1714                 if (unlikely(result.shared)) {
1715                         /*
1716                          * We have a race condition here between the
1717                          * result.shared value returned by the lookup and
1718                          * snapshot creation, which may cause new
1719                          * sharing.
1720                          *
1721                          * To avoid this always quiesce the origin before
1722                          * taking the snap.  You want to do this anyway to
1723                          * ensure a consistent application view
1724                          * (i.e. lockfs).
1725                          *
1726                          * More distant ancestors are irrelevant. The
1727                          * shared flag will be set in their case.
1728                          */
1729                         thin_defer_bio(tc, bio);
1730                         return DM_MAPIO_SUBMITTED;
1731                 }
1732
1733                 build_virtual_key(tc->td, block, &key);
1734                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1735                         return DM_MAPIO_SUBMITTED;
1736
1737                 build_data_key(tc->td, result.block, &key);
1738                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1739                         cell_defer_no_holder_no_free(tc, &cell1);
1740                         return DM_MAPIO_SUBMITTED;
1741                 }
1742
1743                 inc_all_io_entry(tc->pool, bio);
1744                 cell_defer_no_holder_no_free(tc, &cell2);
1745                 cell_defer_no_holder_no_free(tc, &cell1);
1746
1747                 remap(tc, bio, result.block);
1748                 return DM_MAPIO_REMAPPED;
1749
1750         case -ENODATA:
1751                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1752                         /*
1753                          * This block isn't provisioned, and we have no way
1754                          * of doing so.
1755                          */
1756                         handle_unserviceable_bio(tc->pool, bio);
1757                         return DM_MAPIO_SUBMITTED;
1758                 }
1759                 /* fall through */
1760
1761         case -EWOULDBLOCK:
1762                 /*
1763                  * In future, the failed dm_thin_find_block above could
1764                  * provide the hint to load the metadata into cache.
1765                  */
1766                 thin_defer_bio(tc, bio);
1767                 return DM_MAPIO_SUBMITTED;
1768
1769         default:
1770                 /*
1771                  * Must always call bio_io_error on failure.
1772                  * dm_thin_find_block can fail with -EINVAL if the
1773                  * pool is switched to fail-io mode.
1774                  */
1775                 bio_io_error(bio);
1776                 return DM_MAPIO_SUBMITTED;
1777         }
1778 }
1779
1780 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1781 {
1782         int r;
1783         unsigned long flags;
1784         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1785
1786         spin_lock_irqsave(&pt->pool->lock, flags);
1787         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1788         spin_unlock_irqrestore(&pt->pool->lock, flags);
1789
1790         if (!r) {
1791                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1792                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1793         }
1794
1795         return r;
1796 }
1797
1798 static void __requeue_bios(struct pool *pool)
1799 {
1800         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1801         bio_list_init(&pool->retry_on_resume_list);
1802 }
1803
1804 /*----------------------------------------------------------------
1805  * Binding of control targets to a pool object
1806  *--------------------------------------------------------------*/
1807 static bool data_dev_supports_discard(struct pool_c *pt)
1808 {
1809         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1810
1811         return q && blk_queue_discard(q);
1812 }
1813
1814 static bool is_factor(sector_t block_size, uint32_t n)
1815 {
1816         return !sector_div(block_size, n);
1817 }
1818
1819 /*
1820  * If discard_passdown was enabled verify that the data device
1821  * supports discards.  Disable discard_passdown if not.
1822  */
1823 static void disable_passdown_if_not_supported(struct pool_c *pt)
1824 {
1825         struct pool *pool = pt->pool;
1826         struct block_device *data_bdev = pt->data_dev->bdev;
1827         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1828         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1829         const char *reason = NULL;
1830         char buf[BDEVNAME_SIZE];
1831
1832         if (!pt->adjusted_pf.discard_passdown)
1833                 return;
1834
1835         if (!data_dev_supports_discard(pt))
1836                 reason = "discard unsupported";
1837
1838         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1839                 reason = "max discard sectors smaller than a block";
1840
1841         else if (data_limits->discard_granularity > block_size)
1842                 reason = "discard granularity larger than a block";
1843
1844         else if (!is_factor(block_size, data_limits->discard_granularity))
1845                 reason = "discard granularity not a factor of block size";
1846
1847         if (reason) {
1848                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1849                 pt->adjusted_pf.discard_passdown = false;
1850         }
1851 }
1852
1853 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1854 {
1855         struct pool_c *pt = ti->private;
1856
1857         /*
1858          * We want to make sure that a pool in PM_FAIL mode is never upgraded.
1859          */
1860         enum pool_mode old_mode = get_pool_mode(pool);
1861         enum pool_mode new_mode = pt->adjusted_pf.mode;
1862
1863         /*
1864          * Don't change the pool's mode until set_pool_mode() below.
1865          * Otherwise the pool's process_* function pointers may
1866          * not match the desired pool mode.
1867          */
1868         pt->adjusted_pf.mode = old_mode;
1869
1870         pool->ti = ti;
1871         pool->pf = pt->adjusted_pf;
1872         pool->low_water_blocks = pt->low_water_blocks;
1873
1874         set_pool_mode(pool, new_mode);
1875
1876         return 0;
1877 }
1878
1879 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1880 {
1881         if (pool->ti == ti)
1882                 pool->ti = NULL;
1883 }
1884
1885 /*----------------------------------------------------------------
1886  * Pool creation
1887  *--------------------------------------------------------------*/
1888 /* Initialize pool features. */
1889 static void pool_features_init(struct pool_features *pf)
1890 {
1891         pf->mode = PM_WRITE;
1892         pf->zero_new_blocks = true;
1893         pf->discard_enabled = true;
1894         pf->discard_passdown = true;
1895         pf->error_if_no_space = false;
1896 }
1897
1898 static void __pool_destroy(struct pool *pool)
1899 {
1900         __pool_table_remove(pool);
1901
1902         if (dm_pool_metadata_close(pool->pmd) < 0)
1903                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1904
1905         dm_bio_prison_destroy(pool->prison);
1906         dm_kcopyd_client_destroy(pool->copier);
1907
1908         if (pool->wq)
1909                 destroy_workqueue(pool->wq);
1910
1911         if (pool->next_mapping)
1912                 mempool_free(pool->next_mapping, pool->mapping_pool);
1913         mempool_destroy(pool->mapping_pool);
1914         dm_deferred_set_destroy(pool->shared_read_ds);
1915         dm_deferred_set_destroy(pool->all_io_ds);
1916         kfree(pool);
1917 }
1918
1919 static struct kmem_cache *_new_mapping_cache;
1920
1921 static struct pool *pool_create(struct mapped_device *pool_md,
1922                                 struct block_device *metadata_dev,
1923                                 unsigned long block_size,
1924                                 int read_only, char **error)
1925 {
1926         int r;
1927         void *err_p;
1928         struct pool *pool;
1929         struct dm_pool_metadata *pmd;
1930         bool format_device = read_only ? false : true;
1931
1932         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1933         if (IS_ERR(pmd)) {
1934                 *error = "Error creating metadata object";
1935                 return (struct pool *)pmd;
1936         }
1937
1938         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1939         if (!pool) {
1940                 *error = "Error allocating memory for pool";
1941                 err_p = ERR_PTR(-ENOMEM);
1942                 goto bad_pool;
1943         }
1944
1945         pool->pmd = pmd;
1946         pool->sectors_per_block = block_size;
1947         if (block_size & (block_size - 1))
1948                 pool->sectors_per_block_shift = -1;
1949         else
1950                 pool->sectors_per_block_shift = __ffs(block_size);
1951         pool->low_water_blocks = 0;
1952         pool_features_init(&pool->pf);
1953         pool->prison = dm_bio_prison_create(PRISON_CELLS);
1954         if (!pool->prison) {
1955                 *error = "Error creating pool's bio prison";
1956                 err_p = ERR_PTR(-ENOMEM);
1957                 goto bad_prison;
1958         }
1959
1960         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1961         if (IS_ERR(pool->copier)) {
1962                 r = PTR_ERR(pool->copier);
1963                 *error = "Error creating pool's kcopyd client";
1964                 err_p = ERR_PTR(r);
1965                 goto bad_kcopyd_client;
1966         }
1967
1968         /*
1969          * Create singlethreaded workqueue that will service all devices
1970          * that use this metadata.
1971          */
1972         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1973         if (!pool->wq) {
1974                 *error = "Error creating pool's workqueue";
1975                 err_p = ERR_PTR(-ENOMEM);
1976                 goto bad_wq;
1977         }
1978
1979         INIT_WORK(&pool->worker, do_worker);
1980         INIT_DELAYED_WORK(&pool->waker, do_waker);
1981         INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
1982         spin_lock_init(&pool->lock);
1983         bio_list_init(&pool->deferred_bios);
1984         bio_list_init(&pool->deferred_flush_bios);
1985         INIT_LIST_HEAD(&pool->prepared_mappings);
1986         INIT_LIST_HEAD(&pool->prepared_discards);
1987         pool->low_water_triggered = false;
1988         bio_list_init(&pool->retry_on_resume_list);
1989
1990         pool->shared_read_ds = dm_deferred_set_create();
1991         if (!pool->shared_read_ds) {
1992                 *error = "Error creating pool's shared read deferred set";
1993                 err_p = ERR_PTR(-ENOMEM);
1994                 goto bad_shared_read_ds;
1995         }
1996
1997         pool->all_io_ds = dm_deferred_set_create();
1998         if (!pool->all_io_ds) {
1999                 *error = "Error creating pool's all io deferred set";
2000                 err_p = ERR_PTR(-ENOMEM);
2001                 goto bad_all_io_ds;
2002         }
2003
2004         pool->next_mapping = NULL;
2005         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2006                                                       _new_mapping_cache);
2007         if (!pool->mapping_pool) {
2008                 *error = "Error creating pool's mapping mempool";
2009                 err_p = ERR_PTR(-ENOMEM);
2010                 goto bad_mapping_pool;
2011         }
2012
2013         pool->ref_count = 1;
2014         pool->last_commit_jiffies = jiffies;
2015         pool->pool_md = pool_md;
2016         pool->md_dev = metadata_dev;
2017         __pool_table_insert(pool);
2018
2019         return pool;
2020
2021 bad_mapping_pool:
2022         dm_deferred_set_destroy(pool->all_io_ds);
2023 bad_all_io_ds:
2024         dm_deferred_set_destroy(pool->shared_read_ds);
2025 bad_shared_read_ds:
2026         destroy_workqueue(pool->wq);
2027 bad_wq:
2028         dm_kcopyd_client_destroy(pool->copier);
2029 bad_kcopyd_client:
2030         dm_bio_prison_destroy(pool->prison);
2031 bad_prison:
2032         kfree(pool);
2033 bad_pool:
2034         if (dm_pool_metadata_close(pmd))
2035                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2036
2037         return err_p;
2038 }
2039
2040 static void __pool_inc(struct pool *pool)
2041 {
2042         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2043         pool->ref_count++;
2044 }
2045
2046 static void __pool_dec(struct pool *pool)
2047 {
2048         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2049         BUG_ON(!pool->ref_count);
2050         if (!--pool->ref_count)
2051                 __pool_destroy(pool);
2052 }
2053
2054 static struct pool *__pool_find(struct mapped_device *pool_md,
2055                                 struct block_device *metadata_dev,
2056                                 unsigned long block_size, int read_only,
2057                                 char **error, int *created)
2058 {
2059         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2060
2061         if (pool) {
2062                 if (pool->pool_md != pool_md) {
2063                         *error = "metadata device already in use by a pool";
2064                         return ERR_PTR(-EBUSY);
2065                 }
2066                 __pool_inc(pool);
2067
2068         } else {
2069                 pool = __pool_table_lookup(pool_md);
2070                 if (pool) {
2071                         if (pool->md_dev != metadata_dev) {
2072                                 *error = "different pool cannot replace a pool";
2073                                 return ERR_PTR(-EINVAL);
2074                         }
2075                         __pool_inc(pool);
2076
2077                 } else {
2078                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
2079                         *created = 1;
2080                 }
2081         }
2082
2083         return pool;
2084 }
2085
2086 /*----------------------------------------------------------------
2087  * Pool target methods
2088  *--------------------------------------------------------------*/
2089 static void pool_dtr(struct dm_target *ti)
2090 {
2091         struct pool_c *pt = ti->private;
2092
2093         mutex_lock(&dm_thin_pool_table.mutex);
2094
2095         unbind_control_target(pt->pool, ti);
2096         __pool_dec(pt->pool);
2097         dm_put_device(ti, pt->metadata_dev);
2098         dm_put_device(ti, pt->data_dev);
2099         kfree(pt);
2100
2101         mutex_unlock(&dm_thin_pool_table.mutex);
2102 }
2103
2104 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2105                                struct dm_target *ti)
2106 {
2107         int r;
2108         unsigned argc;
2109         const char *arg_name;
2110
2111         static struct dm_arg _args[] = {
2112                 {0, 4, "Invalid number of pool feature arguments"},
2113         };
2114
2115         /*
2116          * No feature arguments supplied.
2117          */
2118         if (!as->argc)
2119                 return 0;
2120
2121         r = dm_read_arg_group(_args, as, &argc, &ti->error);
2122         if (r)
2123                 return -EINVAL;
2124
2125         while (argc && !r) {
2126                 arg_name = dm_shift_arg(as);
2127                 argc--;
2128
2129                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
2130                         pf->zero_new_blocks = false;
2131
2132                 else if (!strcasecmp(arg_name, "ignore_discard"))
2133                         pf->discard_enabled = false;
2134
2135                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
2136                         pf->discard_passdown = false;
2137
2138                 else if (!strcasecmp(arg_name, "read_only"))
2139                         pf->mode = PM_READ_ONLY;
2140
2141                 else if (!strcasecmp(arg_name, "error_if_no_space"))
2142                         pf->error_if_no_space = true;
2143
2144                 else {
2145                         ti->error = "Unrecognised pool feature requested";
2146                         r = -EINVAL;
2147                         break;
2148                 }
2149         }
2150
2151         return r;
2152 }
2153
2154 static void metadata_low_callback(void *context)
2155 {
2156         struct pool *pool = context;
2157
2158         DMWARN("%s: reached low water mark for metadata device: sending event.",
2159                dm_device_name(pool->pool_md));
2160
2161         dm_table_event(pool->ti->table);
2162 }
2163
2164 static sector_t get_dev_size(struct block_device *bdev)
2165 {
2166         return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2167 }
2168
2169 static void warn_if_metadata_device_too_big(struct block_device *bdev)
2170 {
2171         sector_t metadata_dev_size = get_dev_size(bdev);
2172         char buffer[BDEVNAME_SIZE];
2173
2174         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
2175                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2176                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
2177 }
2178
2179 static sector_t get_metadata_dev_size(struct block_device *bdev)
2180 {
2181         sector_t metadata_dev_size = get_dev_size(bdev);
2182
2183         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2184                 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
2185
2186         return metadata_dev_size;
2187 }
2188
2189 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2190 {
2191         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2192
2193         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
2194
2195         return metadata_dev_size;
2196 }
2197
2198 /*
2199  * When a metadata threshold is crossed a dm event is triggered, and
2200  * userland should respond by growing the metadata device.  We could let
2201  * userland set the threshold, like we do with the data threshold, but I'm
2202  * not sure they know enough to do this well.
2203  */
2204 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2205 {
2206         /*
2207          * 4M is ample for all ops with the possible exception of thin
2208          * device deletion which is harmless if it fails (just retry the
2209          * delete after you've grown the device).
2210          */
2211         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2212         return min((dm_block_t)1024ULL /* 4M */, quarter);
2213 }
2214
2215 /*
2216  * thin-pool <metadata dev> <data dev>
2217  *           <data block size (sectors)>
2218  *           <low water mark (blocks)>
2219  *           [<#feature args> [<arg>]*]
2220  *
2221  * Optional feature arguments are:
2222  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
2223  *           ignore_discard: disable discard
2224  *           no_discard_passdown: don't pass discards down to the data device
2225  *           read_only: Don't allow any changes to be made to the pool metadata.
2226  *           error_if_no_space: error IOs, instead of queueing, if no space.
2227  */
2228 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2229 {
2230         int r, pool_created = 0;
2231         struct pool_c *pt;
2232         struct pool *pool;
2233         struct pool_features pf;
2234         struct dm_arg_set as;
2235         struct dm_dev *data_dev;
2236         unsigned long block_size;
2237         dm_block_t low_water_blocks;
2238         struct dm_dev *metadata_dev;
2239         fmode_t metadata_mode;
2240
2241         /*
2242          * FIXME Remove validation from scope of lock.
2243          */
2244         mutex_lock(&dm_thin_pool_table.mutex);
2245
2246         if (argc < 4) {
2247                 ti->error = "Invalid argument count";
2248                 r = -EINVAL;
2249                 goto out_unlock;
2250         }
2251
2252         as.argc = argc;
2253         as.argv = argv;
2254
2255         /*
2256          * Set default pool features.
2257          */
2258         pool_features_init(&pf);
2259
2260         dm_consume_args(&as, 4);
2261         r = parse_pool_features(&as, &pf, ti);
2262         if (r)
2263                 goto out_unlock;
2264
2265         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2266         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2267         if (r) {
2268                 ti->error = "Error opening metadata block device";
2269                 goto out_unlock;
2270         }
2271         warn_if_metadata_device_too_big(metadata_dev->bdev);
2272
2273         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2274         if (r) {
2275                 ti->error = "Error getting data device";
2276                 goto out_metadata;
2277         }
2278
2279         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2280             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2281             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2282             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2283                 ti->error = "Invalid block size";
2284                 r = -EINVAL;
2285                 goto out;
2286         }
2287
2288         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2289                 ti->error = "Invalid low water mark";
2290                 r = -EINVAL;
2291                 goto out;
2292         }
2293
2294         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2295         if (!pt) {
2296                 r = -ENOMEM;
2297                 goto out;
2298         }
2299
2300         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2301                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2302         if (IS_ERR(pool)) {
2303                 r = PTR_ERR(pool);
2304                 goto out_free_pt;
2305         }
2306
2307         /*
2308          * 'pool_created' reflects whether this is the first table load.
2309          * Top level discard support is not allowed to be changed after
2310          * initial load.  This would require a pool reload to trigger thin
2311          * device changes.
2312          */
2313         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2314                 ti->error = "Discard support cannot be disabled once enabled";
2315                 r = -EINVAL;
2316                 goto out_flags_changed;
2317         }
2318
2319         pt->pool = pool;
2320         pt->ti = ti;
2321         pt->metadata_dev = metadata_dev;
2322         pt->data_dev = data_dev;
2323         pt->low_water_blocks = low_water_blocks;
2324         pt->adjusted_pf = pt->requested_pf = pf;
2325         ti->num_flush_bios = 1;
2326
2327         /*
2328          * Only need to enable discards if the pool should pass
2329          * them down to the data device.  The thin device's discard
2330          * processing will cause mappings to be removed from the btree.
2331          */
2332         ti->discard_zeroes_data_unsupported = true;
2333         if (pf.discard_enabled && pf.discard_passdown) {
2334                 ti->num_discard_bios = 1;
2335
2336                 /*
2337                  * Setting 'discards_supported' circumvents the normal
2338                  * stacking of discard limits (this keeps the pool and
2339                  * thin devices' discard limits consistent).
2340                  */
2341                 ti->discards_supported = true;
2342         }
2343         ti->private = pt;
2344
2345         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2346                                                 calc_metadata_threshold(pt),
2347                                                 metadata_low_callback,
2348                                                 pool);
2349         if (r)
2350                 goto out_free_pt;
2351
2352         pt->callbacks.congested_fn = pool_is_congested;
2353         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2354
2355         mutex_unlock(&dm_thin_pool_table.mutex);
2356
2357         return 0;
2358
2359 out_flags_changed:
2360         __pool_dec(pool);
2361 out_free_pt:
2362         kfree(pt);
2363 out:
2364         dm_put_device(ti, data_dev);
2365 out_metadata:
2366         dm_put_device(ti, metadata_dev);
2367 out_unlock:
2368         mutex_unlock(&dm_thin_pool_table.mutex);
2369
2370         return r;
2371 }
2372
2373 static int pool_map(struct dm_target *ti, struct bio *bio)
2374 {
2375         int r;
2376         struct pool_c *pt = ti->private;
2377         struct pool *pool = pt->pool;
2378         unsigned long flags;
2379
2380         /*
2381          * As this is a singleton target, ti->begin is always zero.
2382          */
2383         spin_lock_irqsave(&pool->lock, flags);
2384         bio->bi_bdev = pt->data_dev->bdev;
2385         r = DM_MAPIO_REMAPPED;
2386         spin_unlock_irqrestore(&pool->lock, flags);
2387
2388         return r;
2389 }
2390
2391 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2392 {
2393         int r;
2394         struct pool_c *pt = ti->private;
2395         struct pool *pool = pt->pool;
2396         sector_t data_size = ti->len;
2397         dm_block_t sb_data_size;
2398
2399         *need_commit = false;
2400
2401         (void) sector_div(data_size, pool->sectors_per_block);
2402
2403         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2404         if (r) {
2405                 DMERR("%s: failed to retrieve data device size",
2406                       dm_device_name(pool->pool_md));
2407                 return r;
2408         }
2409
2410         if (data_size < sb_data_size) {
2411                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2412                       dm_device_name(pool->pool_md),
2413                       (unsigned long long)data_size, sb_data_size);
2414                 return -EINVAL;
2415
2416         } else if (data_size > sb_data_size) {
2417                 if (dm_pool_metadata_needs_check(pool->pmd)) {
2418                         DMERR("%s: unable to grow the data device until repaired.",
2419                               dm_device_name(pool->pool_md));
2420                         return 0;
2421                 }
2422
2423                 if (sb_data_size)
2424                         DMINFO("%s: growing the data device from %llu to %llu blocks",
2425                                dm_device_name(pool->pool_md),
2426                                sb_data_size, (unsigned long long)data_size);
2427                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2428                 if (r) {
2429                         metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
2430                         return r;
2431                 }
2432
2433                 *need_commit = true;
2434         }
2435
2436         return 0;
2437 }
2438
2439 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2440 {
2441         int r;
2442         struct pool_c *pt = ti->private;
2443         struct pool *pool = pt->pool;
2444         dm_block_t metadata_dev_size, sb_metadata_dev_size;
2445
2446         *need_commit = false;
2447
2448         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2449
2450         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2451         if (r) {
2452                 DMERR("%s: failed to retrieve metadata device size",
2453                       dm_device_name(pool->pool_md));
2454                 return r;
2455         }
2456
2457         if (metadata_dev_size < sb_metadata_dev_size) {
2458                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2459                       dm_device_name(pool->pool_md),
2460                       metadata_dev_size, sb_metadata_dev_size);
2461                 return -EINVAL;
2462
2463         } else if (metadata_dev_size > sb_metadata_dev_size) {
2464                 if (dm_pool_metadata_needs_check(pool->pmd)) {
2465                         DMERR("%s: unable to grow the metadata device until repaired.",
2466                               dm_device_name(pool->pool_md));
2467                         return 0;
2468                 }
2469
2470                 warn_if_metadata_device_too_big(pool->md_dev);
2471                 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2472                        dm_device_name(pool->pool_md),
2473                        sb_metadata_dev_size, metadata_dev_size);
2474                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2475                 if (r) {
2476                         metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
2477                         return r;
2478                 }
2479
2480                 *need_commit = true;
2481         }
2482
2483         return 0;
2484 }
2485
2486 /*
2487  * Retrieves the number of blocks of the data device from
2488  * the superblock and compares it to the actual device size,
2489  * thus resizing the data device in case it has grown.
2490  *
2491  * This both copes with opening preallocated data devices in the ctr
2492  * being followed by a resume
2493  * -and-
2494  * calling the resume method individually after userspace has
2495  * grown the data device in reaction to a table event.
2496  */
2497 static int pool_preresume(struct dm_target *ti)
2498 {
2499         int r;
2500         bool need_commit1, need_commit2;
2501         struct pool_c *pt = ti->private;
2502         struct pool *pool = pt->pool;
2503
2504         /*
2505          * Take control of the pool object.
2506          */
2507         r = bind_control_target(pool, ti);
2508         if (r)
2509                 return r;
2510
2511         r = maybe_resize_data_dev(ti, &need_commit1);
2512         if (r)
2513                 return r;
2514
2515         r = maybe_resize_metadata_dev(ti, &need_commit2);
2516         if (r)
2517                 return r;
2518
2519         if (need_commit1 || need_commit2)
2520                 (void) commit(pool);
2521
2522         return 0;
2523 }
2524
2525 static void pool_resume(struct dm_target *ti)
2526 {
2527         struct pool_c *pt = ti->private;
2528         struct pool *pool = pt->pool;
2529         unsigned long flags;
2530
2531         spin_lock_irqsave(&pool->lock, flags);
2532         pool->low_water_triggered = false;
2533         __requeue_bios(pool);
2534         spin_unlock_irqrestore(&pool->lock, flags);
2535
2536         do_waker(&pool->waker.work);
2537 }
2538
2539 static void pool_postsuspend(struct dm_target *ti)
2540 {
2541         struct pool_c *pt = ti->private;
2542         struct pool *pool = pt->pool;
2543
2544         cancel_delayed_work(&pool->waker);
2545         cancel_delayed_work(&pool->no_space_timeout);
2546         flush_workqueue(pool->wq);
2547         (void) commit(pool);
2548 }
2549
2550 static int check_arg_count(unsigned argc, unsigned args_required)
2551 {
2552         if (argc != args_required) {
2553                 DMWARN("Message received with %u arguments instead of %u.",
2554                        argc, args_required);
2555                 return -EINVAL;
2556         }
2557
2558         return 0;
2559 }
2560
2561 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2562 {
2563         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2564             *dev_id <= MAX_DEV_ID)
2565                 return 0;
2566
2567         if (warning)
2568                 DMWARN("Message received with invalid device id: %s", arg);
2569
2570         return -EINVAL;
2571 }
2572
2573 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2574 {
2575         dm_thin_id dev_id;
2576         int r;
2577
2578         r = check_arg_count(argc, 2);
2579         if (r)
2580                 return r;
2581
2582         r = read_dev_id(argv[1], &dev_id, 1);
2583         if (r)
2584                 return r;
2585
2586         r = dm_pool_create_thin(pool->pmd, dev_id);
2587         if (r) {
2588                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2589                        argv[1]);
2590                 return r;
2591         }
2592
2593         return 0;
2594 }
2595
2596 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2597 {
2598         dm_thin_id dev_id;
2599         dm_thin_id origin_dev_id;
2600         int r;
2601
2602         r = check_arg_count(argc, 3);
2603         if (r)
2604                 return r;
2605
2606         r = read_dev_id(argv[1], &dev_id, 1);
2607         if (r)
2608                 return r;
2609
2610         r = read_dev_id(argv[2], &origin_dev_id, 1);
2611         if (r)
2612                 return r;
2613
2614         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2615         if (r) {
2616                 DMWARN("Creation of new snapshot %s of device %s failed.",
2617                        argv[1], argv[2]);
2618                 return r;
2619         }
2620
2621         return 0;
2622 }
2623
2624 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2625 {
2626         dm_thin_id dev_id;
2627         int r;
2628
2629         r = check_arg_count(argc, 2);
2630         if (r)
2631                 return r;
2632
2633         r = read_dev_id(argv[1], &dev_id, 1);
2634         if (r)
2635                 return r;
2636
2637         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2638         if (r)
2639                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2640
2641         return r;
2642 }
2643
2644 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2645 {
2646         dm_thin_id old_id, new_id;
2647         int r;
2648
2649         r = check_arg_count(argc, 3);
2650         if (r)
2651                 return r;
2652
2653         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2654                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2655                 return -EINVAL;
2656         }
2657
2658         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2659                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2660                 return -EINVAL;
2661         }
2662
2663         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2664         if (r) {
2665                 DMWARN("Failed to change transaction id from %s to %s.",
2666                        argv[1], argv[2]);
2667                 return r;
2668         }
2669
2670         return 0;
2671 }
2672
2673 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2674 {
2675         int r;
2676
2677         r = check_arg_count(argc, 1);
2678         if (r)
2679                 return r;
2680
2681         (void) commit(pool);
2682
2683         r = dm_pool_reserve_metadata_snap(pool->pmd);
2684         if (r)
2685                 DMWARN("reserve_metadata_snap message failed.");
2686
2687         return r;
2688 }
2689
2690 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2691 {
2692         int r;
2693
2694         r = check_arg_count(argc, 1);
2695         if (r)
2696                 return r;
2697
2698         r = dm_pool_release_metadata_snap(pool->pmd);
2699         if (r)
2700                 DMWARN("release_metadata_snap message failed.");
2701
2702         return r;
2703 }
2704
2705 /*
2706  * Messages supported:
2707  *   create_thin        <dev_id>
2708  *   create_snap        <dev_id> <origin_id>
2709  *   delete             <dev_id>
2710  *   trim               <dev_id> <new_size_in_sectors>
2711  *   set_transaction_id <current_trans_id> <new_trans_id>
2712  *   reserve_metadata_snap
2713  *   release_metadata_snap
2714  */
2715 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2716 {
2717         int r = -EINVAL;
2718         struct pool_c *pt = ti->private;
2719         struct pool *pool = pt->pool;
2720
2721         if (!strcasecmp(argv[0], "create_thin"))
2722                 r = process_create_thin_mesg(argc, argv, pool);
2723
2724         else if (!strcasecmp(argv[0], "create_snap"))
2725                 r = process_create_snap_mesg(argc, argv, pool);
2726
2727         else if (!strcasecmp(argv[0], "delete"))
2728                 r = process_delete_mesg(argc, argv, pool);
2729
2730         else if (!strcasecmp(argv[0], "set_transaction_id"))
2731                 r = process_set_transaction_id_mesg(argc, argv, pool);
2732
2733         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2734                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2735
2736         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2737                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2738
2739         else
2740                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2741
2742         if (!r)
2743                 (void) commit(pool);
2744
2745         return r;
2746 }
2747
2748 static void emit_flags(struct pool_features *pf, char *result,
2749                        unsigned sz, unsigned maxlen)
2750 {
2751         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2752                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2753                 pf->error_if_no_space;
2754         DMEMIT("%u ", count);
2755
2756         if (!pf->zero_new_blocks)
2757                 DMEMIT("skip_block_zeroing ");
2758
2759         if (!pf->discard_enabled)
2760                 DMEMIT("ignore_discard ");
2761
2762         if (!pf->discard_passdown)
2763                 DMEMIT("no_discard_passdown ");
2764
2765         if (pf->mode == PM_READ_ONLY)
2766                 DMEMIT("read_only ");
2767
2768         if (pf->error_if_no_space)
2769                 DMEMIT("error_if_no_space ");
2770 }
2771
2772 /*
2773  * Status line is:
2774  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2775  *    <used data sectors>/<total data sectors> <held metadata root>
2776  */
2777 static void pool_status(struct dm_target *ti, status_type_t type,
2778                         unsigned status_flags, char *result, unsigned maxlen)
2779 {
2780         int r;
2781         unsigned sz = 0;
2782         uint64_t transaction_id;
2783         dm_block_t nr_free_blocks_data;
2784         dm_block_t nr_free_blocks_metadata;
2785         dm_block_t nr_blocks_data;
2786         dm_block_t nr_blocks_metadata;
2787         dm_block_t held_root;
2788         char buf[BDEVNAME_SIZE];
2789         char buf2[BDEVNAME_SIZE];
2790         struct pool_c *pt = ti->private;
2791         struct pool *pool = pt->pool;
2792
2793         switch (type) {
2794         case STATUSTYPE_INFO:
2795                 if (get_pool_mode(pool) == PM_FAIL) {
2796                         DMEMIT("Fail");
2797                         break;
2798                 }
2799
2800                 /* Commit to ensure statistics aren't out-of-date */
2801                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2802                         (void) commit(pool);
2803
2804                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2805                 if (r) {
2806                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2807                               dm_device_name(pool->pool_md), r);
2808                         goto err;
2809                 }
2810
2811                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2812                 if (r) {
2813                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2814                               dm_device_name(pool->pool_md), r);
2815                         goto err;
2816                 }
2817
2818                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2819                 if (r) {
2820                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2821                               dm_device_name(pool->pool_md), r);
2822                         goto err;
2823                 }
2824
2825                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2826                 if (r) {
2827                         DMERR("%s: dm_pool_get_free_block_count returned %d",
2828                               dm_device_name(pool->pool_md), r);
2829                         goto err;
2830                 }
2831
2832                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2833                 if (r) {
2834                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
2835                               dm_device_name(pool->pool_md), r);
2836                         goto err;
2837                 }
2838
2839                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2840                 if (r) {
2841                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
2842                               dm_device_name(pool->pool_md), r);
2843                         goto err;
2844                 }
2845
2846                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2847                        (unsigned long long)transaction_id,
2848                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2849                        (unsigned long long)nr_blocks_metadata,
2850                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2851                        (unsigned long long)nr_blocks_data);
2852
2853                 if (held_root)
2854                         DMEMIT("%llu ", held_root);
2855                 else
2856                         DMEMIT("- ");
2857
2858                 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
2859                         DMEMIT("out_of_data_space ");
2860                 else if (pool->pf.mode == PM_READ_ONLY)
2861                         DMEMIT("ro ");
2862                 else
2863                         DMEMIT("rw ");
2864
2865                 if (!pool->pf.discard_enabled)
2866                         DMEMIT("ignore_discard ");
2867                 else if (pool->pf.discard_passdown)
2868                         DMEMIT("discard_passdown ");
2869                 else
2870                         DMEMIT("no_discard_passdown ");
2871
2872                 if (pool->pf.error_if_no_space)
2873                         DMEMIT("error_if_no_space ");
2874                 else
2875                         DMEMIT("queue_if_no_space ");
2876
2877                 break;
2878
2879         case STATUSTYPE_TABLE:
2880                 DMEMIT("%s %s %lu %llu ",
2881                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2882                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2883                        (unsigned long)pool->sectors_per_block,
2884                        (unsigned long long)pt->low_water_blocks);
2885                 emit_flags(&pt->requested_pf, result, sz, maxlen);
2886                 break;
2887         }
2888         return;
2889
2890 err:
2891         DMEMIT("Error");
2892 }
2893
2894 static int pool_iterate_devices(struct dm_target *ti,
2895                                 iterate_devices_callout_fn fn, void *data)
2896 {
2897         struct pool_c *pt = ti->private;
2898
2899         return fn(ti, pt->data_dev, 0, ti->len, data);
2900 }
2901
2902 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2903                       struct bio_vec *biovec, int max_size)
2904 {
2905         struct pool_c *pt = ti->private;
2906         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2907
2908         if (!q->merge_bvec_fn)
2909                 return max_size;
2910
2911         bvm->bi_bdev = pt->data_dev->bdev;
2912
2913         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2914 }
2915
2916 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2917 {
2918         struct pool *pool = pt->pool;
2919         struct queue_limits *data_limits;
2920
2921         limits->max_discard_sectors = pool->sectors_per_block;
2922
2923         /*
2924          * discard_granularity is just a hint, and not enforced.
2925          */
2926         if (pt->adjusted_pf.discard_passdown) {
2927                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2928                 limits->discard_granularity = max(data_limits->discard_granularity,
2929                                                   pool->sectors_per_block << SECTOR_SHIFT);
2930         } else
2931                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2932 }
2933
2934 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2935 {
2936         struct pool_c *pt = ti->private;
2937         struct pool *pool = pt->pool;
2938         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2939
2940         /*
2941          * If the system-determined stacked limits are compatible with the
2942          * pool's blocksize (io_opt is a factor) do not override them.
2943          */
2944         if (io_opt_sectors < pool->sectors_per_block ||
2945             do_div(io_opt_sectors, pool->sectors_per_block)) {
2946                 blk_limits_io_min(limits, 0);
2947                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2948         }
2949
2950         /*
2951          * pt->adjusted_pf is a staging area for the actual features to use.
2952          * They get transferred to the live pool in bind_control_target()
2953          * called from pool_preresume().
2954          */
2955         if (!pt->adjusted_pf.discard_enabled) {
2956                 /*
2957                  * Must explicitly disallow stacking discard limits otherwise the
2958                  * block layer will stack them if pool's data device has support.
2959                  * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
2960                  * user to see that, so make sure to set all discard limits to 0.
2961                  */
2962                 limits->discard_granularity = 0;
2963                 return;
2964         }
2965
2966         disable_passdown_if_not_supported(pt);
2967
2968         set_discard_limits(pt, limits);
2969 }
2970
2971 static struct target_type pool_target = {
2972         .name = "thin-pool",
2973         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2974                     DM_TARGET_IMMUTABLE,
2975         .version = {1, 11, 0},
2976         .module = THIS_MODULE,
2977         .ctr = pool_ctr,
2978         .dtr = pool_dtr,
2979         .map = pool_map,
2980         .postsuspend = pool_postsuspend,
2981         .preresume = pool_preresume,
2982         .resume = pool_resume,
2983         .message = pool_message,
2984         .status = pool_status,
2985         .merge = pool_merge,
2986         .iterate_devices = pool_iterate_devices,
2987         .io_hints = pool_io_hints,
2988 };
2989
2990 /*----------------------------------------------------------------
2991  * Thin target methods
2992  *--------------------------------------------------------------*/
2993 static void thin_dtr(struct dm_target *ti)
2994 {
2995         struct thin_c *tc = ti->private;
2996
2997         mutex_lock(&dm_thin_pool_table.mutex);
2998
2999         __pool_dec(tc->pool);
3000         dm_pool_close_thin_device(tc->td);
3001         dm_put_device(ti, tc->pool_dev);
3002         if (tc->origin_dev)
3003                 dm_put_device(ti, tc->origin_dev);
3004         kfree(tc);
3005
3006         mutex_unlock(&dm_thin_pool_table.mutex);
3007 }
3008
3009 /*
3010  * Thin target parameters:
3011  *
3012  * <pool_dev> <dev_id> [origin_dev]
3013  *
3014  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3015  * dev_id: the internal device identifier
3016  * origin_dev: a device external to the pool that should act as the origin
3017  *
3018  * If the pool device has discards disabled, they get disabled for the thin
3019  * device as well.
3020  */
3021 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3022 {
3023         int r;
3024         struct thin_c *tc;
3025         struct dm_dev *pool_dev, *origin_dev;
3026         struct mapped_device *pool_md;
3027
3028         mutex_lock(&dm_thin_pool_table.mutex);
3029
3030         if (argc != 2 && argc != 3) {
3031                 ti->error = "Invalid argument count";
3032                 r = -EINVAL;
3033                 goto out_unlock;
3034         }
3035
3036         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3037         if (!tc) {
3038                 ti->error = "Out of memory";
3039                 r = -ENOMEM;
3040                 goto out_unlock;
3041         }
3042
3043         if (argc == 3) {
3044                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3045                 if (r) {
3046                         ti->error = "Error opening origin device";
3047                         goto bad_origin_dev;
3048                 }
3049                 tc->origin_dev = origin_dev;
3050         }
3051
3052         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3053         if (r) {
3054                 ti->error = "Error opening pool device";
3055                 goto bad_pool_dev;
3056         }
3057         tc->pool_dev = pool_dev;
3058
3059         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3060                 ti->error = "Invalid device id";
3061                 r = -EINVAL;
3062                 goto bad_common;
3063         }
3064
3065         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3066         if (!pool_md) {
3067                 ti->error = "Couldn't get pool mapped device";
3068                 r = -EINVAL;
3069                 goto bad_common;
3070         }
3071
3072         tc->pool = __pool_table_lookup(pool_md);
3073         if (!tc->pool) {
3074                 ti->error = "Couldn't find pool object";
3075                 r = -EINVAL;
3076                 goto bad_pool_lookup;
3077         }
3078         __pool_inc(tc->pool);
3079
3080         if (get_pool_mode(tc->pool) == PM_FAIL) {
3081                 ti->error = "Couldn't open thin device, Pool is in fail mode";
3082                 r = -EINVAL;
3083                 goto bad_thin_open;
3084         }
3085
3086         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3087         if (r) {
3088                 ti->error = "Couldn't open thin internal device";
3089                 goto bad_thin_open;
3090         }
3091
3092         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3093         if (r)
3094                 goto bad_target_max_io_len;
3095
3096         ti->num_flush_bios = 1;
3097         ti->flush_supported = true;
3098         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
3099
3100         /* In case the pool supports discards, pass them on. */
3101         ti->discard_zeroes_data_unsupported = true;
3102         if (tc->pool->pf.discard_enabled) {
3103                 ti->discards_supported = true;
3104                 ti->num_discard_bios = 1;
3105                 /* Discard bios must be split on a block boundary */
3106                 ti->split_discard_bios = true;
3107         }
3108
3109         dm_put(pool_md);
3110
3111         mutex_unlock(&dm_thin_pool_table.mutex);
3112
3113         return 0;
3114
3115 bad_target_max_io_len:
3116         dm_pool_close_thin_device(tc->td);
3117 bad_thin_open:
3118         __pool_dec(tc->pool);
3119 bad_pool_lookup:
3120         dm_put(pool_md);
3121 bad_common:
3122         dm_put_device(ti, tc->pool_dev);
3123 bad_pool_dev:
3124         if (tc->origin_dev)
3125                 dm_put_device(ti, tc->origin_dev);
3126 bad_origin_dev:
3127         kfree(tc);
3128 out_unlock:
3129         mutex_unlock(&dm_thin_pool_table.mutex);
3130
3131         return r;
3132 }
3133
3134 static int thin_map(struct dm_target *ti, struct bio *bio)
3135 {
3136         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
3137
3138         return thin_bio_map(ti, bio);
3139 }
3140
3141 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3142 {
3143         unsigned long flags;
3144         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
3145         struct list_head work;
3146         struct dm_thin_new_mapping *m, *tmp;
3147         struct pool *pool = h->tc->pool;
3148
3149         if (h->shared_read_entry) {
3150                 INIT_LIST_HEAD(&work);
3151                 dm_deferred_entry_dec(h->shared_read_entry, &work);
3152
3153                 spin_lock_irqsave(&pool->lock, flags);
3154                 list_for_each_entry_safe(m, tmp, &work, list) {
3155                         list_del(&m->list);
3156                         m->quiesced = true;
3157                         __maybe_add_mapping(m);
3158                 }
3159                 spin_unlock_irqrestore(&pool->lock, flags);
3160         }
3161
3162         if (h->all_io_entry) {
3163                 INIT_LIST_HEAD(&work);
3164                 dm_deferred_entry_dec(h->all_io_entry, &work);
3165                 if (!list_empty(&work)) {
3166                         spin_lock_irqsave(&pool->lock, flags);
3167                         list_for_each_entry_safe(m, tmp, &work, list)
3168                                 list_add_tail(&m->list, &pool->prepared_discards);
3169                         spin_unlock_irqrestore(&pool->lock, flags);
3170                         wake_worker(pool);
3171                 }
3172         }
3173
3174         return 0;
3175 }
3176
3177 static void thin_presuspend(struct dm_target *ti)
3178 {
3179         struct thin_c *tc = ti->private;
3180
3181         if (dm_noflush_suspending(ti))
3182                 noflush_work(tc, do_noflush_start);
3183 }
3184
3185 static void thin_postsuspend(struct dm_target *ti)
3186 {
3187         struct thin_c *tc = ti->private;
3188
3189         /*
3190          * The dm_noflush_suspending flag has been cleared by now, so
3191          * unfortunately we must always run this.
3192          */
3193         noflush_work(tc, do_noflush_stop);
3194 }
3195
3196 /*
3197  * <nr mapped sectors> <highest mapped sector>
3198  */
3199 static void thin_status(struct dm_target *ti, status_type_t type,
3200                         unsigned status_flags, char *result, unsigned maxlen)
3201 {
3202         int r;
3203         ssize_t sz = 0;
3204         dm_block_t mapped, highest;
3205         char buf[BDEVNAME_SIZE];
3206         struct thin_c *tc = ti->private;
3207
3208         if (get_pool_mode(tc->pool) == PM_FAIL) {
3209                 DMEMIT("Fail");
3210                 return;
3211         }
3212
3213         if (!tc->td)
3214                 DMEMIT("-");
3215         else {
3216                 switch (type) {
3217                 case STATUSTYPE_INFO:
3218                         r = dm_thin_get_mapped_count(tc->td, &mapped);
3219                         if (r) {
3220                                 DMERR("dm_thin_get_mapped_count returned %d", r);
3221                                 goto err;
3222                         }
3223
3224                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
3225                         if (r < 0) {
3226                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3227                                 goto err;
3228                         }
3229
3230                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3231                         if (r)
3232                                 DMEMIT("%llu", ((highest + 1) *
3233                                                 tc->pool->sectors_per_block) - 1);
3234                         else
3235                                 DMEMIT("-");
3236                         break;
3237
3238                 case STATUSTYPE_TABLE:
3239                         DMEMIT("%s %lu",
3240                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3241                                (unsigned long) tc->dev_id);
3242                         if (tc->origin_dev)
3243                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
3244                         break;
3245                 }
3246         }
3247
3248         return;
3249
3250 err:
3251         DMEMIT("Error");
3252 }
3253
3254 static int thin_iterate_devices(struct dm_target *ti,
3255                                 iterate_devices_callout_fn fn, void *data)
3256 {
3257         sector_t blocks;
3258         struct thin_c *tc = ti->private;
3259         struct pool *pool = tc->pool;
3260
3261         /*
3262          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
3263          * we follow a more convoluted path through to the pool's target.
3264          */
3265         if (!pool->ti)
3266                 return 0;       /* nothing is bound */
3267
3268         blocks = pool->ti->len;
3269         (void) sector_div(blocks, pool->sectors_per_block);
3270         if (blocks)
3271                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
3272
3273         return 0;
3274 }
3275
3276 static struct target_type thin_target = {
3277         .name = "thin",
3278         .version = {1, 11, 0},
3279         .module = THIS_MODULE,
3280         .ctr = thin_ctr,
3281         .dtr = thin_dtr,
3282         .map = thin_map,
3283         .end_io = thin_endio,
3284         .presuspend = thin_presuspend,
3285         .postsuspend = thin_postsuspend,
3286         .status = thin_status,
3287         .iterate_devices = thin_iterate_devices,
3288 };
3289
3290 /*----------------------------------------------------------------*/
3291
3292 static int __init dm_thin_init(void)
3293 {
3294         int r;
3295
3296         pool_table_init();
3297
3298         r = dm_register_target(&thin_target);
3299         if (r)
3300                 return r;
3301
3302         r = dm_register_target(&pool_target);
3303         if (r)
3304                 goto bad_pool_target;
3305
3306         r = -ENOMEM;
3307
3308         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3309         if (!_new_mapping_cache)
3310                 goto bad_new_mapping_cache;
3311
3312         return 0;
3313
3314 bad_new_mapping_cache:
3315         dm_unregister_target(&pool_target);
3316 bad_pool_target:
3317         dm_unregister_target(&thin_target);
3318
3319         return r;
3320 }
3321
3322 static void dm_thin_exit(void)
3323 {
3324         dm_unregister_target(&thin_target);
3325         dm_unregister_target(&pool_target);
3326
3327         kmem_cache_destroy(_new_mapping_cache);
3328 }
3329
3330 module_init(dm_thin_init);
3331 module_exit(dm_thin_exit);
3332
3333 module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3334 MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3335
3336 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3337 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3338 MODULE_LICENSE("GPL");