dm thin: prefix pool error messages with pool device name
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX   "thin"
20
21 /*
22  * Tunable constants
23  */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
30                 "A percentage of time allocated for copy on write");
31
32 /*
33  * The block size of the device holding pool data must be
34  * between 64KB and 1GB.
35  */
36 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
37 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
38
39 /*
40  * Device id is restricted to 24 bits.
41  */
42 #define MAX_DEV_ID ((1 << 24) - 1)
43
44 /*
45  * How do we handle breaking sharing of data blocks?
46  * =================================================
47  *
48  * We use a standard copy-on-write btree to store the mappings for the
49  * devices (note I'm talking about copy-on-write of the metadata here, not
50  * the data).  When you take an internal snapshot you clone the root node
51  * of the origin btree.  After this there is no concept of an origin or a
52  * snapshot.  They are just two device trees that happen to point to the
53  * same data blocks.
54  *
55  * When we get a write in we decide if it's to a shared data block using
56  * some timestamp magic.  If it is, we have to break sharing.
57  *
58  * Let's say we write to a shared block in what was the origin.  The
59  * steps are:
60  *
61  * i) plug io further to this physical block. (see bio_prison code).
62  *
63  * ii) quiesce any read io to that shared data block.  Obviously
64  * including all devices that share this block.  (see dm_deferred_set code)
65  *
66  * iii) copy the data block to a newly allocate block.  This step can be
67  * missed out if the io covers the block. (schedule_copy).
68  *
69  * iv) insert the new mapping into the origin's btree
70  * (process_prepared_mapping).  This act of inserting breaks some
71  * sharing of btree nodes between the two devices.  Breaking sharing only
72  * effects the btree of that specific device.  Btrees for the other
73  * devices that share the block never change.  The btree for the origin
74  * device as it was after the last commit is untouched, ie. we're using
75  * persistent data structures in the functional programming sense.
76  *
77  * v) unplug io to this physical block, including the io that triggered
78  * the breaking of sharing.
79  *
80  * Steps (ii) and (iii) occur in parallel.
81  *
82  * The metadata _doesn't_ need to be committed before the io continues.  We
83  * get away with this because the io is always written to a _new_ block.
84  * If there's a crash, then:
85  *
86  * - The origin mapping will point to the old origin block (the shared
87  * one).  This will contain the data as it was before the io that triggered
88  * the breaking of sharing came in.
89  *
90  * - The snap mapping still points to the old block.  As it would after
91  * the commit.
92  *
93  * The downside of this scheme is the timestamp magic isn't perfect, and
94  * will continue to think that data block in the snapshot device is shared
95  * even after the write to the origin has broken sharing.  I suspect data
96  * blocks will typically be shared by many different devices, so we're
97  * breaking sharing n + 1 times, rather than n, where n is the number of
98  * devices that reference this data block.  At the moment I think the
99  * benefits far, far outweigh the disadvantages.
100  */
101
102 /*----------------------------------------------------------------*/
103
104 /*
105  * Key building.
106  */
107 static void build_data_key(struct dm_thin_device *td,
108                            dm_block_t b, struct dm_cell_key *key)
109 {
110         key->virtual = 0;
111         key->dev = dm_thin_dev_id(td);
112         key->block = b;
113 }
114
115 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
116                               struct dm_cell_key *key)
117 {
118         key->virtual = 1;
119         key->dev = dm_thin_dev_id(td);
120         key->block = b;
121 }
122
123 /*----------------------------------------------------------------*/
124
125 /*
126  * A pool device ties together a metadata device and a data device.  It
127  * also provides the interface for creating and destroying internal
128  * devices.
129  */
130 struct dm_thin_new_mapping;
131
132 /*
133  * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
134  */
135 enum pool_mode {
136         PM_WRITE,               /* metadata may be changed */
137         PM_READ_ONLY,           /* metadata may not be changed */
138         PM_FAIL,                /* all I/O fails */
139 };
140
141 struct pool_features {
142         enum pool_mode mode;
143
144         bool zero_new_blocks:1;
145         bool discard_enabled:1;
146         bool discard_passdown:1;
147 };
148
149 struct thin_c;
150 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
151 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
152
153 struct pool {
154         struct list_head list;
155         struct dm_target *ti;   /* Only set if a pool target is bound */
156
157         struct mapped_device *pool_md;
158         struct block_device *md_dev;
159         struct dm_pool_metadata *pmd;
160
161         dm_block_t low_water_blocks;
162         uint32_t sectors_per_block;
163         int sectors_per_block_shift;
164
165         struct pool_features pf;
166         unsigned low_water_triggered:1; /* A dm event has been sent */
167         unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
168
169         struct dm_bio_prison *prison;
170         struct dm_kcopyd_client *copier;
171
172         struct workqueue_struct *wq;
173         struct work_struct worker;
174         struct delayed_work waker;
175
176         unsigned long last_commit_jiffies;
177         unsigned ref_count;
178
179         spinlock_t lock;
180         struct bio_list deferred_bios;
181         struct bio_list deferred_flush_bios;
182         struct list_head prepared_mappings;
183         struct list_head prepared_discards;
184
185         struct bio_list retry_on_resume_list;
186
187         struct dm_deferred_set *shared_read_ds;
188         struct dm_deferred_set *all_io_ds;
189
190         struct dm_thin_new_mapping *next_mapping;
191         mempool_t *mapping_pool;
192
193         process_bio_fn process_bio;
194         process_bio_fn process_discard;
195
196         process_mapping_fn process_prepared_mapping;
197         process_mapping_fn process_prepared_discard;
198 };
199
200 static enum pool_mode get_pool_mode(struct pool *pool);
201 static void set_pool_mode(struct pool *pool, enum pool_mode mode);
202
203 /*
204  * Target context for a pool.
205  */
206 struct pool_c {
207         struct dm_target *ti;
208         struct pool *pool;
209         struct dm_dev *data_dev;
210         struct dm_dev *metadata_dev;
211         struct dm_target_callbacks callbacks;
212
213         dm_block_t low_water_blocks;
214         struct pool_features requested_pf; /* Features requested during table load */
215         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
216 };
217
218 /*
219  * Target context for a thin.
220  */
221 struct thin_c {
222         struct dm_dev *pool_dev;
223         struct dm_dev *origin_dev;
224         dm_thin_id dev_id;
225
226         struct pool *pool;
227         struct dm_thin_device *td;
228 };
229
230 /*----------------------------------------------------------------*/
231
232 /*
233  * wake_worker() is used when new work is queued and when pool_resume is
234  * ready to continue deferred IO processing.
235  */
236 static void wake_worker(struct pool *pool)
237 {
238         queue_work(pool->wq, &pool->worker);
239 }
240
241 /*----------------------------------------------------------------*/
242
243 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
244                       struct dm_bio_prison_cell **cell_result)
245 {
246         int r;
247         struct dm_bio_prison_cell *cell_prealloc;
248
249         /*
250          * Allocate a cell from the prison's mempool.
251          * This might block but it can't fail.
252          */
253         cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
254
255         r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
256         if (r)
257                 /*
258                  * We reused an old cell; we can get rid of
259                  * the new one.
260                  */
261                 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
262
263         return r;
264 }
265
266 static void cell_release(struct pool *pool,
267                          struct dm_bio_prison_cell *cell,
268                          struct bio_list *bios)
269 {
270         dm_cell_release(pool->prison, cell, bios);
271         dm_bio_prison_free_cell(pool->prison, cell);
272 }
273
274 static void cell_release_no_holder(struct pool *pool,
275                                    struct dm_bio_prison_cell *cell,
276                                    struct bio_list *bios)
277 {
278         dm_cell_release_no_holder(pool->prison, cell, bios);
279         dm_bio_prison_free_cell(pool->prison, cell);
280 }
281
282 static void cell_defer_no_holder_no_free(struct thin_c *tc,
283                                          struct dm_bio_prison_cell *cell)
284 {
285         struct pool *pool = tc->pool;
286         unsigned long flags;
287
288         spin_lock_irqsave(&pool->lock, flags);
289         dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
290         spin_unlock_irqrestore(&pool->lock, flags);
291
292         wake_worker(pool);
293 }
294
295 static void cell_error(struct pool *pool,
296                        struct dm_bio_prison_cell *cell)
297 {
298         dm_cell_error(pool->prison, cell);
299         dm_bio_prison_free_cell(pool->prison, cell);
300 }
301
302 /*----------------------------------------------------------------*/
303
304 /*
305  * A global list of pools that uses a struct mapped_device as a key.
306  */
307 static struct dm_thin_pool_table {
308         struct mutex mutex;
309         struct list_head pools;
310 } dm_thin_pool_table;
311
312 static void pool_table_init(void)
313 {
314         mutex_init(&dm_thin_pool_table.mutex);
315         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
316 }
317
318 static void __pool_table_insert(struct pool *pool)
319 {
320         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
321         list_add(&pool->list, &dm_thin_pool_table.pools);
322 }
323
324 static void __pool_table_remove(struct pool *pool)
325 {
326         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
327         list_del(&pool->list);
328 }
329
330 static struct pool *__pool_table_lookup(struct mapped_device *md)
331 {
332         struct pool *pool = NULL, *tmp;
333
334         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
335
336         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
337                 if (tmp->pool_md == md) {
338                         pool = tmp;
339                         break;
340                 }
341         }
342
343         return pool;
344 }
345
346 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
347 {
348         struct pool *pool = NULL, *tmp;
349
350         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
351
352         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
353                 if (tmp->md_dev == md_dev) {
354                         pool = tmp;
355                         break;
356                 }
357         }
358
359         return pool;
360 }
361
362 /*----------------------------------------------------------------*/
363
364 struct dm_thin_endio_hook {
365         struct thin_c *tc;
366         struct dm_deferred_entry *shared_read_entry;
367         struct dm_deferred_entry *all_io_entry;
368         struct dm_thin_new_mapping *overwrite_mapping;
369 };
370
371 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
372 {
373         struct bio *bio;
374         struct bio_list bios;
375
376         bio_list_init(&bios);
377         bio_list_merge(&bios, master);
378         bio_list_init(master);
379
380         while ((bio = bio_list_pop(&bios))) {
381                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
382
383                 if (h->tc == tc)
384                         bio_endio(bio, DM_ENDIO_REQUEUE);
385                 else
386                         bio_list_add(master, bio);
387         }
388 }
389
390 static void requeue_io(struct thin_c *tc)
391 {
392         struct pool *pool = tc->pool;
393         unsigned long flags;
394
395         spin_lock_irqsave(&pool->lock, flags);
396         __requeue_bio_list(tc, &pool->deferred_bios);
397         __requeue_bio_list(tc, &pool->retry_on_resume_list);
398         spin_unlock_irqrestore(&pool->lock, flags);
399 }
400
401 /*
402  * This section of code contains the logic for processing a thin device's IO.
403  * Much of the code depends on pool object resources (lists, workqueues, etc)
404  * but most is exclusively called from the thin target rather than the thin-pool
405  * target.
406  */
407
408 static bool block_size_is_power_of_two(struct pool *pool)
409 {
410         return pool->sectors_per_block_shift >= 0;
411 }
412
413 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
414 {
415         struct pool *pool = tc->pool;
416         sector_t block_nr = bio->bi_sector;
417
418         if (block_size_is_power_of_two(pool))
419                 block_nr >>= pool->sectors_per_block_shift;
420         else
421                 (void) sector_div(block_nr, pool->sectors_per_block);
422
423         return block_nr;
424 }
425
426 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
427 {
428         struct pool *pool = tc->pool;
429         sector_t bi_sector = bio->bi_sector;
430
431         bio->bi_bdev = tc->pool_dev->bdev;
432         if (block_size_is_power_of_two(pool))
433                 bio->bi_sector = (block << pool->sectors_per_block_shift) |
434                                 (bi_sector & (pool->sectors_per_block - 1));
435         else
436                 bio->bi_sector = (block * pool->sectors_per_block) +
437                                  sector_div(bi_sector, pool->sectors_per_block);
438 }
439
440 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
441 {
442         bio->bi_bdev = tc->origin_dev->bdev;
443 }
444
445 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
446 {
447         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
448                 dm_thin_changed_this_transaction(tc->td);
449 }
450
451 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
452 {
453         struct dm_thin_endio_hook *h;
454
455         if (bio->bi_rw & REQ_DISCARD)
456                 return;
457
458         h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
459         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
460 }
461
462 static void issue(struct thin_c *tc, struct bio *bio)
463 {
464         struct pool *pool = tc->pool;
465         unsigned long flags;
466
467         if (!bio_triggers_commit(tc, bio)) {
468                 generic_make_request(bio);
469                 return;
470         }
471
472         /*
473          * Complete bio with an error if earlier I/O caused changes to
474          * the metadata that can't be committed e.g, due to I/O errors
475          * on the metadata device.
476          */
477         if (dm_thin_aborted_changes(tc->td)) {
478                 bio_io_error(bio);
479                 return;
480         }
481
482         /*
483          * Batch together any bios that trigger commits and then issue a
484          * single commit for them in process_deferred_bios().
485          */
486         spin_lock_irqsave(&pool->lock, flags);
487         bio_list_add(&pool->deferred_flush_bios, bio);
488         spin_unlock_irqrestore(&pool->lock, flags);
489 }
490
491 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
492 {
493         remap_to_origin(tc, bio);
494         issue(tc, bio);
495 }
496
497 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
498                             dm_block_t block)
499 {
500         remap(tc, bio, block);
501         issue(tc, bio);
502 }
503
504 /*----------------------------------------------------------------*/
505
506 /*
507  * Bio endio functions.
508  */
509 struct dm_thin_new_mapping {
510         struct list_head list;
511
512         unsigned quiesced:1;
513         unsigned prepared:1;
514         unsigned pass_discard:1;
515
516         struct thin_c *tc;
517         dm_block_t virt_block;
518         dm_block_t data_block;
519         struct dm_bio_prison_cell *cell, *cell2;
520         int err;
521
522         /*
523          * If the bio covers the whole area of a block then we can avoid
524          * zeroing or copying.  Instead this bio is hooked.  The bio will
525          * still be in the cell, so care has to be taken to avoid issuing
526          * the bio twice.
527          */
528         struct bio *bio;
529         bio_end_io_t *saved_bi_end_io;
530 };
531
532 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
533 {
534         struct pool *pool = m->tc->pool;
535
536         if (m->quiesced && m->prepared) {
537                 list_add(&m->list, &pool->prepared_mappings);
538                 wake_worker(pool);
539         }
540 }
541
542 static void copy_complete(int read_err, unsigned long write_err, void *context)
543 {
544         unsigned long flags;
545         struct dm_thin_new_mapping *m = context;
546         struct pool *pool = m->tc->pool;
547
548         m->err = read_err || write_err ? -EIO : 0;
549
550         spin_lock_irqsave(&pool->lock, flags);
551         m->prepared = 1;
552         __maybe_add_mapping(m);
553         spin_unlock_irqrestore(&pool->lock, flags);
554 }
555
556 static void overwrite_endio(struct bio *bio, int err)
557 {
558         unsigned long flags;
559         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
560         struct dm_thin_new_mapping *m = h->overwrite_mapping;
561         struct pool *pool = m->tc->pool;
562
563         m->err = err;
564
565         spin_lock_irqsave(&pool->lock, flags);
566         m->prepared = 1;
567         __maybe_add_mapping(m);
568         spin_unlock_irqrestore(&pool->lock, flags);
569 }
570
571 /*----------------------------------------------------------------*/
572
573 /*
574  * Workqueue.
575  */
576
577 /*
578  * Prepared mapping jobs.
579  */
580
581 /*
582  * This sends the bios in the cell back to the deferred_bios list.
583  */
584 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
585 {
586         struct pool *pool = tc->pool;
587         unsigned long flags;
588
589         spin_lock_irqsave(&pool->lock, flags);
590         cell_release(pool, cell, &pool->deferred_bios);
591         spin_unlock_irqrestore(&tc->pool->lock, flags);
592
593         wake_worker(pool);
594 }
595
596 /*
597  * Same as cell_defer above, except it omits the original holder of the cell.
598  */
599 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
600 {
601         struct pool *pool = tc->pool;
602         unsigned long flags;
603
604         spin_lock_irqsave(&pool->lock, flags);
605         cell_release_no_holder(pool, cell, &pool->deferred_bios);
606         spin_unlock_irqrestore(&pool->lock, flags);
607
608         wake_worker(pool);
609 }
610
611 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
612 {
613         if (m->bio)
614                 m->bio->bi_end_io = m->saved_bi_end_io;
615         cell_error(m->tc->pool, m->cell);
616         list_del(&m->list);
617         mempool_free(m, m->tc->pool->mapping_pool);
618 }
619
620 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
621 {
622         struct thin_c *tc = m->tc;
623         struct pool *pool = tc->pool;
624         struct bio *bio;
625         int r;
626
627         bio = m->bio;
628         if (bio)
629                 bio->bi_end_io = m->saved_bi_end_io;
630
631         if (m->err) {
632                 cell_error(pool, m->cell);
633                 goto out;
634         }
635
636         /*
637          * Commit the prepared block into the mapping btree.
638          * Any I/O for this block arriving after this point will get
639          * remapped to it directly.
640          */
641         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
642         if (r) {
643                 DMERR_LIMIT("dm_thin_insert_block() failed");
644                 cell_error(pool, m->cell);
645                 goto out;
646         }
647
648         /*
649          * Release any bios held while the block was being provisioned.
650          * If we are processing a write bio that completely covers the block,
651          * we already processed it so can ignore it now when processing
652          * the bios in the cell.
653          */
654         if (bio) {
655                 cell_defer_no_holder(tc, m->cell);
656                 bio_endio(bio, 0);
657         } else
658                 cell_defer(tc, m->cell);
659
660 out:
661         list_del(&m->list);
662         mempool_free(m, pool->mapping_pool);
663 }
664
665 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
666 {
667         struct thin_c *tc = m->tc;
668
669         bio_io_error(m->bio);
670         cell_defer_no_holder(tc, m->cell);
671         cell_defer_no_holder(tc, m->cell2);
672         mempool_free(m, tc->pool->mapping_pool);
673 }
674
675 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
676 {
677         struct thin_c *tc = m->tc;
678
679         inc_all_io_entry(tc->pool, m->bio);
680         cell_defer_no_holder(tc, m->cell);
681         cell_defer_no_holder(tc, m->cell2);
682
683         if (m->pass_discard)
684                 remap_and_issue(tc, m->bio, m->data_block);
685         else
686                 bio_endio(m->bio, 0);
687
688         mempool_free(m, tc->pool->mapping_pool);
689 }
690
691 static void process_prepared_discard(struct dm_thin_new_mapping *m)
692 {
693         int r;
694         struct thin_c *tc = m->tc;
695
696         r = dm_thin_remove_block(tc->td, m->virt_block);
697         if (r)
698                 DMERR_LIMIT("dm_thin_remove_block() failed");
699
700         process_prepared_discard_passdown(m);
701 }
702
703 static void process_prepared(struct pool *pool, struct list_head *head,
704                              process_mapping_fn *fn)
705 {
706         unsigned long flags;
707         struct list_head maps;
708         struct dm_thin_new_mapping *m, *tmp;
709
710         INIT_LIST_HEAD(&maps);
711         spin_lock_irqsave(&pool->lock, flags);
712         list_splice_init(head, &maps);
713         spin_unlock_irqrestore(&pool->lock, flags);
714
715         list_for_each_entry_safe(m, tmp, &maps, list)
716                 (*fn)(m);
717 }
718
719 /*
720  * Deferred bio jobs.
721  */
722 static int io_overlaps_block(struct pool *pool, struct bio *bio)
723 {
724         return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
725 }
726
727 static int io_overwrites_block(struct pool *pool, struct bio *bio)
728 {
729         return (bio_data_dir(bio) == WRITE) &&
730                 io_overlaps_block(pool, bio);
731 }
732
733 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
734                                bio_end_io_t *fn)
735 {
736         *save = bio->bi_end_io;
737         bio->bi_end_io = fn;
738 }
739
740 static int ensure_next_mapping(struct pool *pool)
741 {
742         if (pool->next_mapping)
743                 return 0;
744
745         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
746
747         return pool->next_mapping ? 0 : -ENOMEM;
748 }
749
750 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
751 {
752         struct dm_thin_new_mapping *r = pool->next_mapping;
753
754         BUG_ON(!pool->next_mapping);
755
756         pool->next_mapping = NULL;
757
758         return r;
759 }
760
761 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
762                           struct dm_dev *origin, dm_block_t data_origin,
763                           dm_block_t data_dest,
764                           struct dm_bio_prison_cell *cell, struct bio *bio)
765 {
766         int r;
767         struct pool *pool = tc->pool;
768         struct dm_thin_new_mapping *m = get_next_mapping(pool);
769
770         INIT_LIST_HEAD(&m->list);
771         m->quiesced = 0;
772         m->prepared = 0;
773         m->tc = tc;
774         m->virt_block = virt_block;
775         m->data_block = data_dest;
776         m->cell = cell;
777         m->err = 0;
778         m->bio = NULL;
779
780         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
781                 m->quiesced = 1;
782
783         /*
784          * IO to pool_dev remaps to the pool target's data_dev.
785          *
786          * If the whole block of data is being overwritten, we can issue the
787          * bio immediately. Otherwise we use kcopyd to clone the data first.
788          */
789         if (io_overwrites_block(pool, bio)) {
790                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
791
792                 h->overwrite_mapping = m;
793                 m->bio = bio;
794                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
795                 inc_all_io_entry(pool, bio);
796                 remap_and_issue(tc, bio, data_dest);
797         } else {
798                 struct dm_io_region from, to;
799
800                 from.bdev = origin->bdev;
801                 from.sector = data_origin * pool->sectors_per_block;
802                 from.count = pool->sectors_per_block;
803
804                 to.bdev = tc->pool_dev->bdev;
805                 to.sector = data_dest * pool->sectors_per_block;
806                 to.count = pool->sectors_per_block;
807
808                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
809                                    0, copy_complete, m);
810                 if (r < 0) {
811                         mempool_free(m, pool->mapping_pool);
812                         DMERR_LIMIT("dm_kcopyd_copy() failed");
813                         cell_error(pool, cell);
814                 }
815         }
816 }
817
818 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
819                                    dm_block_t data_origin, dm_block_t data_dest,
820                                    struct dm_bio_prison_cell *cell, struct bio *bio)
821 {
822         schedule_copy(tc, virt_block, tc->pool_dev,
823                       data_origin, data_dest, cell, bio);
824 }
825
826 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
827                                    dm_block_t data_dest,
828                                    struct dm_bio_prison_cell *cell, struct bio *bio)
829 {
830         schedule_copy(tc, virt_block, tc->origin_dev,
831                       virt_block, data_dest, cell, bio);
832 }
833
834 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
835                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
836                           struct bio *bio)
837 {
838         struct pool *pool = tc->pool;
839         struct dm_thin_new_mapping *m = get_next_mapping(pool);
840
841         INIT_LIST_HEAD(&m->list);
842         m->quiesced = 1;
843         m->prepared = 0;
844         m->tc = tc;
845         m->virt_block = virt_block;
846         m->data_block = data_block;
847         m->cell = cell;
848         m->err = 0;
849         m->bio = NULL;
850
851         /*
852          * If the whole block of data is being overwritten or we are not
853          * zeroing pre-existing data, we can issue the bio immediately.
854          * Otherwise we use kcopyd to zero the data first.
855          */
856         if (!pool->pf.zero_new_blocks)
857                 process_prepared_mapping(m);
858
859         else if (io_overwrites_block(pool, bio)) {
860                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
861
862                 h->overwrite_mapping = m;
863                 m->bio = bio;
864                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
865                 inc_all_io_entry(pool, bio);
866                 remap_and_issue(tc, bio, data_block);
867         } else {
868                 int r;
869                 struct dm_io_region to;
870
871                 to.bdev = tc->pool_dev->bdev;
872                 to.sector = data_block * pool->sectors_per_block;
873                 to.count = pool->sectors_per_block;
874
875                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
876                 if (r < 0) {
877                         mempool_free(m, pool->mapping_pool);
878                         DMERR_LIMIT("dm_kcopyd_zero() failed");
879                         cell_error(pool, cell);
880                 }
881         }
882 }
883
884 static int commit(struct pool *pool)
885 {
886         int r;
887
888         r = dm_pool_commit_metadata(pool->pmd);
889         if (r)
890                 DMERR_LIMIT("%s: commit failed: error = %d",
891                             dm_device_name(pool->pool_md), r);
892
893         return r;
894 }
895
896 /*
897  * A non-zero return indicates read_only or fail_io mode.
898  * Many callers don't care about the return value.
899  */
900 static int commit_or_fallback(struct pool *pool)
901 {
902         int r;
903
904         if (get_pool_mode(pool) != PM_WRITE)
905                 return -EINVAL;
906
907         r = commit(pool);
908         if (r)
909                 set_pool_mode(pool, PM_READ_ONLY);
910
911         return r;
912 }
913
914 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
915 {
916         int r;
917         dm_block_t free_blocks;
918         unsigned long flags;
919         struct pool *pool = tc->pool;
920
921         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
922         if (r)
923                 return r;
924
925         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
926                 DMWARN("%s: reached low water mark for data device: sending event.",
927                        dm_device_name(pool->pool_md));
928                 spin_lock_irqsave(&pool->lock, flags);
929                 pool->low_water_triggered = 1;
930                 spin_unlock_irqrestore(&pool->lock, flags);
931                 dm_table_event(pool->ti->table);
932         }
933
934         if (!free_blocks) {
935                 if (pool->no_free_space)
936                         return -ENOSPC;
937                 else {
938                         /*
939                          * Try to commit to see if that will free up some
940                          * more space.
941                          */
942                         (void) commit_or_fallback(pool);
943
944                         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
945                         if (r)
946                                 return r;
947
948                         /*
949                          * If we still have no space we set a flag to avoid
950                          * doing all this checking and return -ENOSPC.
951                          */
952                         if (!free_blocks) {
953                                 DMWARN("%s: no free space available.",
954                                        dm_device_name(pool->pool_md));
955                                 spin_lock_irqsave(&pool->lock, flags);
956                                 pool->no_free_space = 1;
957                                 spin_unlock_irqrestore(&pool->lock, flags);
958                                 return -ENOSPC;
959                         }
960                 }
961         }
962
963         r = dm_pool_alloc_data_block(pool->pmd, result);
964         if (r)
965                 return r;
966
967         return 0;
968 }
969
970 /*
971  * If we have run out of space, queue bios until the device is
972  * resumed, presumably after having been reloaded with more space.
973  */
974 static void retry_on_resume(struct bio *bio)
975 {
976         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
977         struct thin_c *tc = h->tc;
978         struct pool *pool = tc->pool;
979         unsigned long flags;
980
981         spin_lock_irqsave(&pool->lock, flags);
982         bio_list_add(&pool->retry_on_resume_list, bio);
983         spin_unlock_irqrestore(&pool->lock, flags);
984 }
985
986 static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
987 {
988         struct bio *bio;
989         struct bio_list bios;
990
991         bio_list_init(&bios);
992         cell_release(pool, cell, &bios);
993
994         while ((bio = bio_list_pop(&bios)))
995                 retry_on_resume(bio);
996 }
997
998 static void process_discard(struct thin_c *tc, struct bio *bio)
999 {
1000         int r;
1001         unsigned long flags;
1002         struct pool *pool = tc->pool;
1003         struct dm_bio_prison_cell *cell, *cell2;
1004         struct dm_cell_key key, key2;
1005         dm_block_t block = get_bio_block(tc, bio);
1006         struct dm_thin_lookup_result lookup_result;
1007         struct dm_thin_new_mapping *m;
1008
1009         build_virtual_key(tc->td, block, &key);
1010         if (bio_detain(tc->pool, &key, bio, &cell))
1011                 return;
1012
1013         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1014         switch (r) {
1015         case 0:
1016                 /*
1017                  * Check nobody is fiddling with this pool block.  This can
1018                  * happen if someone's in the process of breaking sharing
1019                  * on this block.
1020                  */
1021                 build_data_key(tc->td, lookup_result.block, &key2);
1022                 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1023                         cell_defer_no_holder(tc, cell);
1024                         break;
1025                 }
1026
1027                 if (io_overlaps_block(pool, bio)) {
1028                         /*
1029                          * IO may still be going to the destination block.  We must
1030                          * quiesce before we can do the removal.
1031                          */
1032                         m = get_next_mapping(pool);
1033                         m->tc = tc;
1034                         m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1035                         m->virt_block = block;
1036                         m->data_block = lookup_result.block;
1037                         m->cell = cell;
1038                         m->cell2 = cell2;
1039                         m->err = 0;
1040                         m->bio = bio;
1041
1042                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1043                                 spin_lock_irqsave(&pool->lock, flags);
1044                                 list_add(&m->list, &pool->prepared_discards);
1045                                 spin_unlock_irqrestore(&pool->lock, flags);
1046                                 wake_worker(pool);
1047                         }
1048                 } else {
1049                         inc_all_io_entry(pool, bio);
1050                         cell_defer_no_holder(tc, cell);
1051                         cell_defer_no_holder(tc, cell2);
1052
1053                         /*
1054                          * The DM core makes sure that the discard doesn't span
1055                          * a block boundary.  So we submit the discard of a
1056                          * partial block appropriately.
1057                          */
1058                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
1059                                 remap_and_issue(tc, bio, lookup_result.block);
1060                         else
1061                                 bio_endio(bio, 0);
1062                 }
1063                 break;
1064
1065         case -ENODATA:
1066                 /*
1067                  * It isn't provisioned, just forget it.
1068                  */
1069                 cell_defer_no_holder(tc, cell);
1070                 bio_endio(bio, 0);
1071                 break;
1072
1073         default:
1074                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1075                             __func__, r);
1076                 cell_defer_no_holder(tc, cell);
1077                 bio_io_error(bio);
1078                 break;
1079         }
1080 }
1081
1082 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1083                           struct dm_cell_key *key,
1084                           struct dm_thin_lookup_result *lookup_result,
1085                           struct dm_bio_prison_cell *cell)
1086 {
1087         int r;
1088         dm_block_t data_block;
1089
1090         r = alloc_data_block(tc, &data_block);
1091         switch (r) {
1092         case 0:
1093                 schedule_internal_copy(tc, block, lookup_result->block,
1094                                        data_block, cell, bio);
1095                 break;
1096
1097         case -ENOSPC:
1098                 no_space(tc->pool, cell);
1099                 break;
1100
1101         default:
1102                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1103                             __func__, r);
1104                 cell_error(tc->pool, cell);
1105                 break;
1106         }
1107 }
1108
1109 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1110                                dm_block_t block,
1111                                struct dm_thin_lookup_result *lookup_result)
1112 {
1113         struct dm_bio_prison_cell *cell;
1114         struct pool *pool = tc->pool;
1115         struct dm_cell_key key;
1116
1117         /*
1118          * If cell is already occupied, then sharing is already in the process
1119          * of being broken so we have nothing further to do here.
1120          */
1121         build_data_key(tc->td, lookup_result->block, &key);
1122         if (bio_detain(pool, &key, bio, &cell))
1123                 return;
1124
1125         if (bio_data_dir(bio) == WRITE && bio->bi_size)
1126                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1127         else {
1128                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1129
1130                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1131                 inc_all_io_entry(pool, bio);
1132                 cell_defer_no_holder(tc, cell);
1133
1134                 remap_and_issue(tc, bio, lookup_result->block);
1135         }
1136 }
1137
1138 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1139                             struct dm_bio_prison_cell *cell)
1140 {
1141         int r;
1142         dm_block_t data_block;
1143         struct pool *pool = tc->pool;
1144
1145         /*
1146          * Remap empty bios (flushes) immediately, without provisioning.
1147          */
1148         if (!bio->bi_size) {
1149                 inc_all_io_entry(pool, bio);
1150                 cell_defer_no_holder(tc, cell);
1151
1152                 remap_and_issue(tc, bio, 0);
1153                 return;
1154         }
1155
1156         /*
1157          * Fill read bios with zeroes and complete them immediately.
1158          */
1159         if (bio_data_dir(bio) == READ) {
1160                 zero_fill_bio(bio);
1161                 cell_defer_no_holder(tc, cell);
1162                 bio_endio(bio, 0);
1163                 return;
1164         }
1165
1166         r = alloc_data_block(tc, &data_block);
1167         switch (r) {
1168         case 0:
1169                 if (tc->origin_dev)
1170                         schedule_external_copy(tc, block, data_block, cell, bio);
1171                 else
1172                         schedule_zero(tc, block, data_block, cell, bio);
1173                 break;
1174
1175         case -ENOSPC:
1176                 no_space(pool, cell);
1177                 break;
1178
1179         default:
1180                 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1181                             __func__, r);
1182                 set_pool_mode(pool, PM_READ_ONLY);
1183                 cell_error(pool, cell);
1184                 break;
1185         }
1186 }
1187
1188 static void process_bio(struct thin_c *tc, struct bio *bio)
1189 {
1190         int r;
1191         struct pool *pool = tc->pool;
1192         dm_block_t block = get_bio_block(tc, bio);
1193         struct dm_bio_prison_cell *cell;
1194         struct dm_cell_key key;
1195         struct dm_thin_lookup_result lookup_result;
1196
1197         /*
1198          * If cell is already occupied, then the block is already
1199          * being provisioned so we have nothing further to do here.
1200          */
1201         build_virtual_key(tc->td, block, &key);
1202         if (bio_detain(pool, &key, bio, &cell))
1203                 return;
1204
1205         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1206         switch (r) {
1207         case 0:
1208                 if (lookup_result.shared) {
1209                         process_shared_bio(tc, bio, block, &lookup_result);
1210                         cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1211                 } else {
1212                         inc_all_io_entry(pool, bio);
1213                         cell_defer_no_holder(tc, cell);
1214
1215                         remap_and_issue(tc, bio, lookup_result.block);
1216                 }
1217                 break;
1218
1219         case -ENODATA:
1220                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1221                         inc_all_io_entry(pool, bio);
1222                         cell_defer_no_holder(tc, cell);
1223
1224                         remap_to_origin_and_issue(tc, bio);
1225                 } else
1226                         provision_block(tc, bio, block, cell);
1227                 break;
1228
1229         default:
1230                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1231                             __func__, r);
1232                 cell_defer_no_holder(tc, cell);
1233                 bio_io_error(bio);
1234                 break;
1235         }
1236 }
1237
1238 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1239 {
1240         int r;
1241         int rw = bio_data_dir(bio);
1242         dm_block_t block = get_bio_block(tc, bio);
1243         struct dm_thin_lookup_result lookup_result;
1244
1245         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1246         switch (r) {
1247         case 0:
1248                 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1249                         bio_io_error(bio);
1250                 else {
1251                         inc_all_io_entry(tc->pool, bio);
1252                         remap_and_issue(tc, bio, lookup_result.block);
1253                 }
1254                 break;
1255
1256         case -ENODATA:
1257                 if (rw != READ) {
1258                         bio_io_error(bio);
1259                         break;
1260                 }
1261
1262                 if (tc->origin_dev) {
1263                         inc_all_io_entry(tc->pool, bio);
1264                         remap_to_origin_and_issue(tc, bio);
1265                         break;
1266                 }
1267
1268                 zero_fill_bio(bio);
1269                 bio_endio(bio, 0);
1270                 break;
1271
1272         default:
1273                 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1274                             __func__, r);
1275                 bio_io_error(bio);
1276                 break;
1277         }
1278 }
1279
1280 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1281 {
1282         bio_io_error(bio);
1283 }
1284
1285 /*
1286  * FIXME: should we also commit due to size of transaction, measured in
1287  * metadata blocks?
1288  */
1289 static int need_commit_due_to_time(struct pool *pool)
1290 {
1291         return jiffies < pool->last_commit_jiffies ||
1292                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1293 }
1294
1295 static void process_deferred_bios(struct pool *pool)
1296 {
1297         unsigned long flags;
1298         struct bio *bio;
1299         struct bio_list bios;
1300
1301         bio_list_init(&bios);
1302
1303         spin_lock_irqsave(&pool->lock, flags);
1304         bio_list_merge(&bios, &pool->deferred_bios);
1305         bio_list_init(&pool->deferred_bios);
1306         spin_unlock_irqrestore(&pool->lock, flags);
1307
1308         while ((bio = bio_list_pop(&bios))) {
1309                 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1310                 struct thin_c *tc = h->tc;
1311
1312                 /*
1313                  * If we've got no free new_mapping structs, and processing
1314                  * this bio might require one, we pause until there are some
1315                  * prepared mappings to process.
1316                  */
1317                 if (ensure_next_mapping(pool)) {
1318                         spin_lock_irqsave(&pool->lock, flags);
1319                         bio_list_merge(&pool->deferred_bios, &bios);
1320                         spin_unlock_irqrestore(&pool->lock, flags);
1321
1322                         break;
1323                 }
1324
1325                 if (bio->bi_rw & REQ_DISCARD)
1326                         pool->process_discard(tc, bio);
1327                 else
1328                         pool->process_bio(tc, bio);
1329         }
1330
1331         /*
1332          * If there are any deferred flush bios, we must commit
1333          * the metadata before issuing them.
1334          */
1335         bio_list_init(&bios);
1336         spin_lock_irqsave(&pool->lock, flags);
1337         bio_list_merge(&bios, &pool->deferred_flush_bios);
1338         bio_list_init(&pool->deferred_flush_bios);
1339         spin_unlock_irqrestore(&pool->lock, flags);
1340
1341         if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1342                 return;
1343
1344         if (commit_or_fallback(pool)) {
1345                 while ((bio = bio_list_pop(&bios)))
1346                         bio_io_error(bio);
1347                 return;
1348         }
1349         pool->last_commit_jiffies = jiffies;
1350
1351         while ((bio = bio_list_pop(&bios)))
1352                 generic_make_request(bio);
1353 }
1354
1355 static void do_worker(struct work_struct *ws)
1356 {
1357         struct pool *pool = container_of(ws, struct pool, worker);
1358
1359         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1360         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1361         process_deferred_bios(pool);
1362 }
1363
1364 /*
1365  * We want to commit periodically so that not too much
1366  * unwritten data builds up.
1367  */
1368 static void do_waker(struct work_struct *ws)
1369 {
1370         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1371         wake_worker(pool);
1372         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1373 }
1374
1375 /*----------------------------------------------------------------*/
1376
1377 static enum pool_mode get_pool_mode(struct pool *pool)
1378 {
1379         return pool->pf.mode;
1380 }
1381
1382 static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1383 {
1384         int r;
1385
1386         pool->pf.mode = mode;
1387
1388         switch (mode) {
1389         case PM_FAIL:
1390                 DMERR("%s: switching pool to failure mode",
1391                       dm_device_name(pool->pool_md));
1392                 pool->process_bio = process_bio_fail;
1393                 pool->process_discard = process_bio_fail;
1394                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1395                 pool->process_prepared_discard = process_prepared_discard_fail;
1396                 break;
1397
1398         case PM_READ_ONLY:
1399                 DMERR("%s: switching pool to read-only mode",
1400                       dm_device_name(pool->pool_md));
1401                 r = dm_pool_abort_metadata(pool->pmd);
1402                 if (r) {
1403                         DMERR("%s: aborting transaction failed",
1404                               dm_device_name(pool->pool_md));
1405                         set_pool_mode(pool, PM_FAIL);
1406                 } else {
1407                         dm_pool_metadata_read_only(pool->pmd);
1408                         pool->process_bio = process_bio_read_only;
1409                         pool->process_discard = process_discard;
1410                         pool->process_prepared_mapping = process_prepared_mapping_fail;
1411                         pool->process_prepared_discard = process_prepared_discard_passdown;
1412                 }
1413                 break;
1414
1415         case PM_WRITE:
1416                 pool->process_bio = process_bio;
1417                 pool->process_discard = process_discard;
1418                 pool->process_prepared_mapping = process_prepared_mapping;
1419                 pool->process_prepared_discard = process_prepared_discard;
1420                 break;
1421         }
1422 }
1423
1424 /*----------------------------------------------------------------*/
1425
1426 /*
1427  * Mapping functions.
1428  */
1429
1430 /*
1431  * Called only while mapping a thin bio to hand it over to the workqueue.
1432  */
1433 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1434 {
1435         unsigned long flags;
1436         struct pool *pool = tc->pool;
1437
1438         spin_lock_irqsave(&pool->lock, flags);
1439         bio_list_add(&pool->deferred_bios, bio);
1440         spin_unlock_irqrestore(&pool->lock, flags);
1441
1442         wake_worker(pool);
1443 }
1444
1445 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1446 {
1447         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1448
1449         h->tc = tc;
1450         h->shared_read_entry = NULL;
1451         h->all_io_entry = NULL;
1452         h->overwrite_mapping = NULL;
1453 }
1454
1455 /*
1456  * Non-blocking function called from the thin target's map function.
1457  */
1458 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1459 {
1460         int r;
1461         struct thin_c *tc = ti->private;
1462         dm_block_t block = get_bio_block(tc, bio);
1463         struct dm_thin_device *td = tc->td;
1464         struct dm_thin_lookup_result result;
1465         struct dm_bio_prison_cell cell1, cell2;
1466         struct dm_bio_prison_cell *cell_result;
1467         struct dm_cell_key key;
1468
1469         thin_hook_bio(tc, bio);
1470
1471         if (get_pool_mode(tc->pool) == PM_FAIL) {
1472                 bio_io_error(bio);
1473                 return DM_MAPIO_SUBMITTED;
1474         }
1475
1476         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1477                 thin_defer_bio(tc, bio);
1478                 return DM_MAPIO_SUBMITTED;
1479         }
1480
1481         r = dm_thin_find_block(td, block, 0, &result);
1482
1483         /*
1484          * Note that we defer readahead too.
1485          */
1486         switch (r) {
1487         case 0:
1488                 if (unlikely(result.shared)) {
1489                         /*
1490                          * We have a race condition here between the
1491                          * result.shared value returned by the lookup and
1492                          * snapshot creation, which may cause new
1493                          * sharing.
1494                          *
1495                          * To avoid this always quiesce the origin before
1496                          * taking the snap.  You want to do this anyway to
1497                          * ensure a consistent application view
1498                          * (i.e. lockfs).
1499                          *
1500                          * More distant ancestors are irrelevant. The
1501                          * shared flag will be set in their case.
1502                          */
1503                         thin_defer_bio(tc, bio);
1504                         return DM_MAPIO_SUBMITTED;
1505                 }
1506
1507                 build_virtual_key(tc->td, block, &key);
1508                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1509                         return DM_MAPIO_SUBMITTED;
1510
1511                 build_data_key(tc->td, result.block, &key);
1512                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1513                         cell_defer_no_holder_no_free(tc, &cell1);
1514                         return DM_MAPIO_SUBMITTED;
1515                 }
1516
1517                 inc_all_io_entry(tc->pool, bio);
1518                 cell_defer_no_holder_no_free(tc, &cell2);
1519                 cell_defer_no_holder_no_free(tc, &cell1);
1520
1521                 remap(tc, bio, result.block);
1522                 return DM_MAPIO_REMAPPED;
1523
1524         case -ENODATA:
1525                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1526                         /*
1527                          * This block isn't provisioned, and we have no way
1528                          * of doing so.  Just error it.
1529                          */
1530                         bio_io_error(bio);
1531                         return DM_MAPIO_SUBMITTED;
1532                 }
1533                 /* fall through */
1534
1535         case -EWOULDBLOCK:
1536                 /*
1537                  * In future, the failed dm_thin_find_block above could
1538                  * provide the hint to load the metadata into cache.
1539                  */
1540                 thin_defer_bio(tc, bio);
1541                 return DM_MAPIO_SUBMITTED;
1542
1543         default:
1544                 /*
1545                  * Must always call bio_io_error on failure.
1546                  * dm_thin_find_block can fail with -EINVAL if the
1547                  * pool is switched to fail-io mode.
1548                  */
1549                 bio_io_error(bio);
1550                 return DM_MAPIO_SUBMITTED;
1551         }
1552 }
1553
1554 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1555 {
1556         int r;
1557         unsigned long flags;
1558         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1559
1560         spin_lock_irqsave(&pt->pool->lock, flags);
1561         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1562         spin_unlock_irqrestore(&pt->pool->lock, flags);
1563
1564         if (!r) {
1565                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1566                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1567         }
1568
1569         return r;
1570 }
1571
1572 static void __requeue_bios(struct pool *pool)
1573 {
1574         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1575         bio_list_init(&pool->retry_on_resume_list);
1576 }
1577
1578 /*----------------------------------------------------------------
1579  * Binding of control targets to a pool object
1580  *--------------------------------------------------------------*/
1581 static bool data_dev_supports_discard(struct pool_c *pt)
1582 {
1583         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1584
1585         return q && blk_queue_discard(q);
1586 }
1587
1588 static bool is_factor(sector_t block_size, uint32_t n)
1589 {
1590         return !sector_div(block_size, n);
1591 }
1592
1593 /*
1594  * If discard_passdown was enabled verify that the data device
1595  * supports discards.  Disable discard_passdown if not.
1596  */
1597 static void disable_passdown_if_not_supported(struct pool_c *pt)
1598 {
1599         struct pool *pool = pt->pool;
1600         struct block_device *data_bdev = pt->data_dev->bdev;
1601         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1602         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1603         const char *reason = NULL;
1604         char buf[BDEVNAME_SIZE];
1605
1606         if (!pt->adjusted_pf.discard_passdown)
1607                 return;
1608
1609         if (!data_dev_supports_discard(pt))
1610                 reason = "discard unsupported";
1611
1612         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1613                 reason = "max discard sectors smaller than a block";
1614
1615         else if (data_limits->discard_granularity > block_size)
1616                 reason = "discard granularity larger than a block";
1617
1618         else if (!is_factor(block_size, data_limits->discard_granularity))
1619                 reason = "discard granularity not a factor of block size";
1620
1621         if (reason) {
1622                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1623                 pt->adjusted_pf.discard_passdown = false;
1624         }
1625 }
1626
1627 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1628 {
1629         struct pool_c *pt = ti->private;
1630
1631         /*
1632          * We want to make sure that degraded pools are never upgraded.
1633          */
1634         enum pool_mode old_mode = pool->pf.mode;
1635         enum pool_mode new_mode = pt->adjusted_pf.mode;
1636
1637         if (old_mode > new_mode)
1638                 new_mode = old_mode;
1639
1640         pool->ti = ti;
1641         pool->low_water_blocks = pt->low_water_blocks;
1642         pool->pf = pt->adjusted_pf;
1643
1644         set_pool_mode(pool, new_mode);
1645
1646         return 0;
1647 }
1648
1649 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1650 {
1651         if (pool->ti == ti)
1652                 pool->ti = NULL;
1653 }
1654
1655 /*----------------------------------------------------------------
1656  * Pool creation
1657  *--------------------------------------------------------------*/
1658 /* Initialize pool features. */
1659 static void pool_features_init(struct pool_features *pf)
1660 {
1661         pf->mode = PM_WRITE;
1662         pf->zero_new_blocks = true;
1663         pf->discard_enabled = true;
1664         pf->discard_passdown = true;
1665 }
1666
1667 static void __pool_destroy(struct pool *pool)
1668 {
1669         __pool_table_remove(pool);
1670
1671         if (dm_pool_metadata_close(pool->pmd) < 0)
1672                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1673
1674         dm_bio_prison_destroy(pool->prison);
1675         dm_kcopyd_client_destroy(pool->copier);
1676
1677         if (pool->wq)
1678                 destroy_workqueue(pool->wq);
1679
1680         if (pool->next_mapping)
1681                 mempool_free(pool->next_mapping, pool->mapping_pool);
1682         mempool_destroy(pool->mapping_pool);
1683         dm_deferred_set_destroy(pool->shared_read_ds);
1684         dm_deferred_set_destroy(pool->all_io_ds);
1685         kfree(pool);
1686 }
1687
1688 static struct kmem_cache *_new_mapping_cache;
1689
1690 static struct pool *pool_create(struct mapped_device *pool_md,
1691                                 struct block_device *metadata_dev,
1692                                 unsigned long block_size,
1693                                 int read_only, char **error)
1694 {
1695         int r;
1696         void *err_p;
1697         struct pool *pool;
1698         struct dm_pool_metadata *pmd;
1699         bool format_device = read_only ? false : true;
1700
1701         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1702         if (IS_ERR(pmd)) {
1703                 *error = "Error creating metadata object";
1704                 return (struct pool *)pmd;
1705         }
1706
1707         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1708         if (!pool) {
1709                 *error = "Error allocating memory for pool";
1710                 err_p = ERR_PTR(-ENOMEM);
1711                 goto bad_pool;
1712         }
1713
1714         pool->pmd = pmd;
1715         pool->sectors_per_block = block_size;
1716         if (block_size & (block_size - 1))
1717                 pool->sectors_per_block_shift = -1;
1718         else
1719                 pool->sectors_per_block_shift = __ffs(block_size);
1720         pool->low_water_blocks = 0;
1721         pool_features_init(&pool->pf);
1722         pool->prison = dm_bio_prison_create(PRISON_CELLS);
1723         if (!pool->prison) {
1724                 *error = "Error creating pool's bio prison";
1725                 err_p = ERR_PTR(-ENOMEM);
1726                 goto bad_prison;
1727         }
1728
1729         pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1730         if (IS_ERR(pool->copier)) {
1731                 r = PTR_ERR(pool->copier);
1732                 *error = "Error creating pool's kcopyd client";
1733                 err_p = ERR_PTR(r);
1734                 goto bad_kcopyd_client;
1735         }
1736
1737         /*
1738          * Create singlethreaded workqueue that will service all devices
1739          * that use this metadata.
1740          */
1741         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1742         if (!pool->wq) {
1743                 *error = "Error creating pool's workqueue";
1744                 err_p = ERR_PTR(-ENOMEM);
1745                 goto bad_wq;
1746         }
1747
1748         INIT_WORK(&pool->worker, do_worker);
1749         INIT_DELAYED_WORK(&pool->waker, do_waker);
1750         spin_lock_init(&pool->lock);
1751         bio_list_init(&pool->deferred_bios);
1752         bio_list_init(&pool->deferred_flush_bios);
1753         INIT_LIST_HEAD(&pool->prepared_mappings);
1754         INIT_LIST_HEAD(&pool->prepared_discards);
1755         pool->low_water_triggered = 0;
1756         pool->no_free_space = 0;
1757         bio_list_init(&pool->retry_on_resume_list);
1758
1759         pool->shared_read_ds = dm_deferred_set_create();
1760         if (!pool->shared_read_ds) {
1761                 *error = "Error creating pool's shared read deferred set";
1762                 err_p = ERR_PTR(-ENOMEM);
1763                 goto bad_shared_read_ds;
1764         }
1765
1766         pool->all_io_ds = dm_deferred_set_create();
1767         if (!pool->all_io_ds) {
1768                 *error = "Error creating pool's all io deferred set";
1769                 err_p = ERR_PTR(-ENOMEM);
1770                 goto bad_all_io_ds;
1771         }
1772
1773         pool->next_mapping = NULL;
1774         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1775                                                       _new_mapping_cache);
1776         if (!pool->mapping_pool) {
1777                 *error = "Error creating pool's mapping mempool";
1778                 err_p = ERR_PTR(-ENOMEM);
1779                 goto bad_mapping_pool;
1780         }
1781
1782         pool->ref_count = 1;
1783         pool->last_commit_jiffies = jiffies;
1784         pool->pool_md = pool_md;
1785         pool->md_dev = metadata_dev;
1786         __pool_table_insert(pool);
1787
1788         return pool;
1789
1790 bad_mapping_pool:
1791         dm_deferred_set_destroy(pool->all_io_ds);
1792 bad_all_io_ds:
1793         dm_deferred_set_destroy(pool->shared_read_ds);
1794 bad_shared_read_ds:
1795         destroy_workqueue(pool->wq);
1796 bad_wq:
1797         dm_kcopyd_client_destroy(pool->copier);
1798 bad_kcopyd_client:
1799         dm_bio_prison_destroy(pool->prison);
1800 bad_prison:
1801         kfree(pool);
1802 bad_pool:
1803         if (dm_pool_metadata_close(pmd))
1804                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1805
1806         return err_p;
1807 }
1808
1809 static void __pool_inc(struct pool *pool)
1810 {
1811         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1812         pool->ref_count++;
1813 }
1814
1815 static void __pool_dec(struct pool *pool)
1816 {
1817         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1818         BUG_ON(!pool->ref_count);
1819         if (!--pool->ref_count)
1820                 __pool_destroy(pool);
1821 }
1822
1823 static struct pool *__pool_find(struct mapped_device *pool_md,
1824                                 struct block_device *metadata_dev,
1825                                 unsigned long block_size, int read_only,
1826                                 char **error, int *created)
1827 {
1828         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1829
1830         if (pool) {
1831                 if (pool->pool_md != pool_md) {
1832                         *error = "metadata device already in use by a pool";
1833                         return ERR_PTR(-EBUSY);
1834                 }
1835                 __pool_inc(pool);
1836
1837         } else {
1838                 pool = __pool_table_lookup(pool_md);
1839                 if (pool) {
1840                         if (pool->md_dev != metadata_dev) {
1841                                 *error = "different pool cannot replace a pool";
1842                                 return ERR_PTR(-EINVAL);
1843                         }
1844                         __pool_inc(pool);
1845
1846                 } else {
1847                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1848                         *created = 1;
1849                 }
1850         }
1851
1852         return pool;
1853 }
1854
1855 /*----------------------------------------------------------------
1856  * Pool target methods
1857  *--------------------------------------------------------------*/
1858 static void pool_dtr(struct dm_target *ti)
1859 {
1860         struct pool_c *pt = ti->private;
1861
1862         mutex_lock(&dm_thin_pool_table.mutex);
1863
1864         unbind_control_target(pt->pool, ti);
1865         __pool_dec(pt->pool);
1866         dm_put_device(ti, pt->metadata_dev);
1867         dm_put_device(ti, pt->data_dev);
1868         kfree(pt);
1869
1870         mutex_unlock(&dm_thin_pool_table.mutex);
1871 }
1872
1873 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1874                                struct dm_target *ti)
1875 {
1876         int r;
1877         unsigned argc;
1878         const char *arg_name;
1879
1880         static struct dm_arg _args[] = {
1881                 {0, 3, "Invalid number of pool feature arguments"},
1882         };
1883
1884         /*
1885          * No feature arguments supplied.
1886          */
1887         if (!as->argc)
1888                 return 0;
1889
1890         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1891         if (r)
1892                 return -EINVAL;
1893
1894         while (argc && !r) {
1895                 arg_name = dm_shift_arg(as);
1896                 argc--;
1897
1898                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1899                         pf->zero_new_blocks = false;
1900
1901                 else if (!strcasecmp(arg_name, "ignore_discard"))
1902                         pf->discard_enabled = false;
1903
1904                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1905                         pf->discard_passdown = false;
1906
1907                 else if (!strcasecmp(arg_name, "read_only"))
1908                         pf->mode = PM_READ_ONLY;
1909
1910                 else {
1911                         ti->error = "Unrecognised pool feature requested";
1912                         r = -EINVAL;
1913                         break;
1914                 }
1915         }
1916
1917         return r;
1918 }
1919
1920 static void metadata_low_callback(void *context)
1921 {
1922         struct pool *pool = context;
1923
1924         DMWARN("%s: reached low water mark for metadata device: sending event.",
1925                dm_device_name(pool->pool_md));
1926
1927         dm_table_event(pool->ti->table);
1928 }
1929
1930 static sector_t get_metadata_dev_size(struct block_device *bdev)
1931 {
1932         sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
1933         char buffer[BDEVNAME_SIZE];
1934
1935         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
1936                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1937                        bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
1938                 metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
1939         }
1940
1941         return metadata_dev_size;
1942 }
1943
1944 static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
1945 {
1946         sector_t metadata_dev_size = get_metadata_dev_size(bdev);
1947
1948         sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
1949
1950         return metadata_dev_size;
1951 }
1952
1953 /*
1954  * When a metadata threshold is crossed a dm event is triggered, and
1955  * userland should respond by growing the metadata device.  We could let
1956  * userland set the threshold, like we do with the data threshold, but I'm
1957  * not sure they know enough to do this well.
1958  */
1959 static dm_block_t calc_metadata_threshold(struct pool_c *pt)
1960 {
1961         /*
1962          * 4M is ample for all ops with the possible exception of thin
1963          * device deletion which is harmless if it fails (just retry the
1964          * delete after you've grown the device).
1965          */
1966         dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
1967         return min((dm_block_t)1024ULL /* 4M */, quarter);
1968 }
1969
1970 /*
1971  * thin-pool <metadata dev> <data dev>
1972  *           <data block size (sectors)>
1973  *           <low water mark (blocks)>
1974  *           [<#feature args> [<arg>]*]
1975  *
1976  * Optional feature arguments are:
1977  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1978  *           ignore_discard: disable discard
1979  *           no_discard_passdown: don't pass discards down to the data device
1980  */
1981 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1982 {
1983         int r, pool_created = 0;
1984         struct pool_c *pt;
1985         struct pool *pool;
1986         struct pool_features pf;
1987         struct dm_arg_set as;
1988         struct dm_dev *data_dev;
1989         unsigned long block_size;
1990         dm_block_t low_water_blocks;
1991         struct dm_dev *metadata_dev;
1992         fmode_t metadata_mode;
1993
1994         /*
1995          * FIXME Remove validation from scope of lock.
1996          */
1997         mutex_lock(&dm_thin_pool_table.mutex);
1998
1999         if (argc < 4) {
2000                 ti->error = "Invalid argument count";
2001                 r = -EINVAL;
2002                 goto out_unlock;
2003         }
2004
2005         as.argc = argc;
2006         as.argv = argv;
2007
2008         /*
2009          * Set default pool features.
2010          */
2011         pool_features_init(&pf);
2012
2013         dm_consume_args(&as, 4);
2014         r = parse_pool_features(&as, &pf, ti);
2015         if (r)
2016                 goto out_unlock;
2017
2018         metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2019         r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
2020         if (r) {
2021                 ti->error = "Error opening metadata block device";
2022                 goto out_unlock;
2023         }
2024
2025         /*
2026          * Run for the side-effect of possibly issuing a warning if the
2027          * device is too big.
2028          */
2029         (void) get_metadata_dev_size(metadata_dev->bdev);
2030
2031         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2032         if (r) {
2033                 ti->error = "Error getting data device";
2034                 goto out_metadata;
2035         }
2036
2037         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2038             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2039             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2040             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2041                 ti->error = "Invalid block size";
2042                 r = -EINVAL;
2043                 goto out;
2044         }
2045
2046         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2047                 ti->error = "Invalid low water mark";
2048                 r = -EINVAL;
2049                 goto out;
2050         }
2051
2052         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2053         if (!pt) {
2054                 r = -ENOMEM;
2055                 goto out;
2056         }
2057
2058         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
2059                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2060         if (IS_ERR(pool)) {
2061                 r = PTR_ERR(pool);
2062                 goto out_free_pt;
2063         }
2064
2065         /*
2066          * 'pool_created' reflects whether this is the first table load.
2067          * Top level discard support is not allowed to be changed after
2068          * initial load.  This would require a pool reload to trigger thin
2069          * device changes.
2070          */
2071         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2072                 ti->error = "Discard support cannot be disabled once enabled";
2073                 r = -EINVAL;
2074                 goto out_flags_changed;
2075         }
2076
2077         pt->pool = pool;
2078         pt->ti = ti;
2079         pt->metadata_dev = metadata_dev;
2080         pt->data_dev = data_dev;
2081         pt->low_water_blocks = low_water_blocks;
2082         pt->adjusted_pf = pt->requested_pf = pf;
2083         ti->num_flush_bios = 1;
2084
2085         /*
2086          * Only need to enable discards if the pool should pass
2087          * them down to the data device.  The thin device's discard
2088          * processing will cause mappings to be removed from the btree.
2089          */
2090         if (pf.discard_enabled && pf.discard_passdown) {
2091                 ti->num_discard_bios = 1;
2092
2093                 /*
2094                  * Setting 'discards_supported' circumvents the normal
2095                  * stacking of discard limits (this keeps the pool and
2096                  * thin devices' discard limits consistent).
2097                  */
2098                 ti->discards_supported = true;
2099                 ti->discard_zeroes_data_unsupported = true;
2100         }
2101         ti->private = pt;
2102
2103         r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2104                                                 calc_metadata_threshold(pt),
2105                                                 metadata_low_callback,
2106                                                 pool);
2107         if (r)
2108                 goto out_free_pt;
2109
2110         pt->callbacks.congested_fn = pool_is_congested;
2111         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2112
2113         mutex_unlock(&dm_thin_pool_table.mutex);
2114
2115         return 0;
2116
2117 out_flags_changed:
2118         __pool_dec(pool);
2119 out_free_pt:
2120         kfree(pt);
2121 out:
2122         dm_put_device(ti, data_dev);
2123 out_metadata:
2124         dm_put_device(ti, metadata_dev);
2125 out_unlock:
2126         mutex_unlock(&dm_thin_pool_table.mutex);
2127
2128         return r;
2129 }
2130
2131 static int pool_map(struct dm_target *ti, struct bio *bio)
2132 {
2133         int r;
2134         struct pool_c *pt = ti->private;
2135         struct pool *pool = pt->pool;
2136         unsigned long flags;
2137
2138         /*
2139          * As this is a singleton target, ti->begin is always zero.
2140          */
2141         spin_lock_irqsave(&pool->lock, flags);
2142         bio->bi_bdev = pt->data_dev->bdev;
2143         r = DM_MAPIO_REMAPPED;
2144         spin_unlock_irqrestore(&pool->lock, flags);
2145
2146         return r;
2147 }
2148
2149 static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
2150 {
2151         int r;
2152         struct pool_c *pt = ti->private;
2153         struct pool *pool = pt->pool;
2154         sector_t data_size = ti->len;
2155         dm_block_t sb_data_size;
2156
2157         *need_commit = false;
2158
2159         (void) sector_div(data_size, pool->sectors_per_block);
2160
2161         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2162         if (r) {
2163                 DMERR("%s: failed to retrieve data device size",
2164                       dm_device_name(pool->pool_md));
2165                 return r;
2166         }
2167
2168         if (data_size < sb_data_size) {
2169                 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2170                       dm_device_name(pool->pool_md),
2171                       (unsigned long long)data_size, sb_data_size);
2172                 return -EINVAL;
2173
2174         } else if (data_size > sb_data_size) {
2175                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2176                 if (r) {
2177                         DMERR("%s: failed to resize data device",
2178                               dm_device_name(pool->pool_md));
2179                         set_pool_mode(pool, PM_READ_ONLY);
2180                         return r;
2181                 }
2182
2183                 *need_commit = true;
2184         }
2185
2186         return 0;
2187 }
2188
2189 static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2190 {
2191         int r;
2192         struct pool_c *pt = ti->private;
2193         struct pool *pool = pt->pool;
2194         dm_block_t metadata_dev_size, sb_metadata_dev_size;
2195
2196         *need_commit = false;
2197
2198         metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2199
2200         r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2201         if (r) {
2202                 DMERR("%s: failed to retrieve metadata device size",
2203                       dm_device_name(pool->pool_md));
2204                 return r;
2205         }
2206
2207         if (metadata_dev_size < sb_metadata_dev_size) {
2208                 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2209                       dm_device_name(pool->pool_md),
2210                       metadata_dev_size, sb_metadata_dev_size);
2211                 return -EINVAL;
2212
2213         } else if (metadata_dev_size > sb_metadata_dev_size) {
2214                 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2215                 if (r) {
2216                         DMERR("%s: failed to resize metadata device",
2217                               dm_device_name(pool->pool_md));
2218                         return r;
2219                 }
2220
2221                 *need_commit = true;
2222         }
2223
2224         return 0;
2225 }
2226
2227 /*
2228  * Retrieves the number of blocks of the data device from
2229  * the superblock and compares it to the actual device size,
2230  * thus resizing the data device in case it has grown.
2231  *
2232  * This both copes with opening preallocated data devices in the ctr
2233  * being followed by a resume
2234  * -and-
2235  * calling the resume method individually after userspace has
2236  * grown the data device in reaction to a table event.
2237  */
2238 static int pool_preresume(struct dm_target *ti)
2239 {
2240         int r;
2241         bool need_commit1, need_commit2;
2242         struct pool_c *pt = ti->private;
2243         struct pool *pool = pt->pool;
2244
2245         /*
2246          * Take control of the pool object.
2247          */
2248         r = bind_control_target(pool, ti);
2249         if (r)
2250                 return r;
2251
2252         r = maybe_resize_data_dev(ti, &need_commit1);
2253         if (r)
2254                 return r;
2255
2256         r = maybe_resize_metadata_dev(ti, &need_commit2);
2257         if (r)
2258                 return r;
2259
2260         if (need_commit1 || need_commit2)
2261                 (void) commit_or_fallback(pool);
2262
2263         return 0;
2264 }
2265
2266 static void pool_resume(struct dm_target *ti)
2267 {
2268         struct pool_c *pt = ti->private;
2269         struct pool *pool = pt->pool;
2270         unsigned long flags;
2271
2272         spin_lock_irqsave(&pool->lock, flags);
2273         pool->low_water_triggered = 0;
2274         pool->no_free_space = 0;
2275         __requeue_bios(pool);
2276         spin_unlock_irqrestore(&pool->lock, flags);
2277
2278         do_waker(&pool->waker.work);
2279 }
2280
2281 static void pool_postsuspend(struct dm_target *ti)
2282 {
2283         struct pool_c *pt = ti->private;
2284         struct pool *pool = pt->pool;
2285
2286         cancel_delayed_work(&pool->waker);
2287         flush_workqueue(pool->wq);
2288         (void) commit_or_fallback(pool);
2289 }
2290
2291 static int check_arg_count(unsigned argc, unsigned args_required)
2292 {
2293         if (argc != args_required) {
2294                 DMWARN("Message received with %u arguments instead of %u.",
2295                        argc, args_required);
2296                 return -EINVAL;
2297         }
2298
2299         return 0;
2300 }
2301
2302 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2303 {
2304         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2305             *dev_id <= MAX_DEV_ID)
2306                 return 0;
2307
2308         if (warning)
2309                 DMWARN("Message received with invalid device id: %s", arg);
2310
2311         return -EINVAL;
2312 }
2313
2314 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2315 {
2316         dm_thin_id dev_id;
2317         int r;
2318
2319         r = check_arg_count(argc, 2);
2320         if (r)
2321                 return r;
2322
2323         r = read_dev_id(argv[1], &dev_id, 1);
2324         if (r)
2325                 return r;
2326
2327         r = dm_pool_create_thin(pool->pmd, dev_id);
2328         if (r) {
2329                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2330                        argv[1]);
2331                 return r;
2332         }
2333
2334         return 0;
2335 }
2336
2337 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2338 {
2339         dm_thin_id dev_id;
2340         dm_thin_id origin_dev_id;
2341         int r;
2342
2343         r = check_arg_count(argc, 3);
2344         if (r)
2345                 return r;
2346
2347         r = read_dev_id(argv[1], &dev_id, 1);
2348         if (r)
2349                 return r;
2350
2351         r = read_dev_id(argv[2], &origin_dev_id, 1);
2352         if (r)
2353                 return r;
2354
2355         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2356         if (r) {
2357                 DMWARN("Creation of new snapshot %s of device %s failed.",
2358                        argv[1], argv[2]);
2359                 return r;
2360         }
2361
2362         return 0;
2363 }
2364
2365 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2366 {
2367         dm_thin_id dev_id;
2368         int r;
2369
2370         r = check_arg_count(argc, 2);
2371         if (r)
2372                 return r;
2373
2374         r = read_dev_id(argv[1], &dev_id, 1);
2375         if (r)
2376                 return r;
2377
2378         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2379         if (r)
2380                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2381
2382         return r;
2383 }
2384
2385 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2386 {
2387         dm_thin_id old_id, new_id;
2388         int r;
2389
2390         r = check_arg_count(argc, 3);
2391         if (r)
2392                 return r;
2393
2394         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2395                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2396                 return -EINVAL;
2397         }
2398
2399         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2400                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2401                 return -EINVAL;
2402         }
2403
2404         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2405         if (r) {
2406                 DMWARN("Failed to change transaction id from %s to %s.",
2407                        argv[1], argv[2]);
2408                 return r;
2409         }
2410
2411         return 0;
2412 }
2413
2414 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2415 {
2416         int r;
2417
2418         r = check_arg_count(argc, 1);
2419         if (r)
2420                 return r;
2421
2422         (void) commit_or_fallback(pool);
2423
2424         r = dm_pool_reserve_metadata_snap(pool->pmd);
2425         if (r)
2426                 DMWARN("reserve_metadata_snap message failed.");
2427
2428         return r;
2429 }
2430
2431 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2432 {
2433         int r;
2434
2435         r = check_arg_count(argc, 1);
2436         if (r)
2437                 return r;
2438
2439         r = dm_pool_release_metadata_snap(pool->pmd);
2440         if (r)
2441                 DMWARN("release_metadata_snap message failed.");
2442
2443         return r;
2444 }
2445
2446 /*
2447  * Messages supported:
2448  *   create_thin        <dev_id>
2449  *   create_snap        <dev_id> <origin_id>
2450  *   delete             <dev_id>
2451  *   trim               <dev_id> <new_size_in_sectors>
2452  *   set_transaction_id <current_trans_id> <new_trans_id>
2453  *   reserve_metadata_snap
2454  *   release_metadata_snap
2455  */
2456 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2457 {
2458         int r = -EINVAL;
2459         struct pool_c *pt = ti->private;
2460         struct pool *pool = pt->pool;
2461
2462         if (!strcasecmp(argv[0], "create_thin"))
2463                 r = process_create_thin_mesg(argc, argv, pool);
2464
2465         else if (!strcasecmp(argv[0], "create_snap"))
2466                 r = process_create_snap_mesg(argc, argv, pool);
2467
2468         else if (!strcasecmp(argv[0], "delete"))
2469                 r = process_delete_mesg(argc, argv, pool);
2470
2471         else if (!strcasecmp(argv[0], "set_transaction_id"))
2472                 r = process_set_transaction_id_mesg(argc, argv, pool);
2473
2474         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2475                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2476
2477         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2478                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2479
2480         else
2481                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2482
2483         if (!r)
2484                 (void) commit_or_fallback(pool);
2485
2486         return r;
2487 }
2488
2489 static void emit_flags(struct pool_features *pf, char *result,
2490                        unsigned sz, unsigned maxlen)
2491 {
2492         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2493                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2494         DMEMIT("%u ", count);
2495
2496         if (!pf->zero_new_blocks)
2497                 DMEMIT("skip_block_zeroing ");
2498
2499         if (!pf->discard_enabled)
2500                 DMEMIT("ignore_discard ");
2501
2502         if (!pf->discard_passdown)
2503                 DMEMIT("no_discard_passdown ");
2504
2505         if (pf->mode == PM_READ_ONLY)
2506                 DMEMIT("read_only ");
2507 }
2508
2509 /*
2510  * Status line is:
2511  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2512  *    <used data sectors>/<total data sectors> <held metadata root>
2513  */
2514 static void pool_status(struct dm_target *ti, status_type_t type,
2515                         unsigned status_flags, char *result, unsigned maxlen)
2516 {
2517         int r;
2518         unsigned sz = 0;
2519         uint64_t transaction_id;
2520         dm_block_t nr_free_blocks_data;
2521         dm_block_t nr_free_blocks_metadata;
2522         dm_block_t nr_blocks_data;
2523         dm_block_t nr_blocks_metadata;
2524         dm_block_t held_root;
2525         char buf[BDEVNAME_SIZE];
2526         char buf2[BDEVNAME_SIZE];
2527         struct pool_c *pt = ti->private;
2528         struct pool *pool = pt->pool;
2529
2530         switch (type) {
2531         case STATUSTYPE_INFO:
2532                 if (get_pool_mode(pool) == PM_FAIL) {
2533                         DMEMIT("Fail");
2534                         break;
2535                 }
2536
2537                 /* Commit to ensure statistics aren't out-of-date */
2538                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2539                         (void) commit_or_fallback(pool);
2540
2541                 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2542                 if (r) {
2543                         DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2544                               dm_device_name(pool->pool_md), r);
2545                         goto err;
2546                 }
2547
2548                 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2549                 if (r) {
2550                         DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2551                               dm_device_name(pool->pool_md), r);
2552                         goto err;
2553                 }
2554
2555                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2556                 if (r) {
2557                         DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2558                               dm_device_name(pool->pool_md), r);
2559                         goto err;
2560                 }
2561
2562                 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2563                 if (r) {
2564                         DMERR("%s: dm_pool_get_free_block_count returned %d",
2565                               dm_device_name(pool->pool_md), r);
2566                         goto err;
2567                 }
2568
2569                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2570                 if (r) {
2571                         DMERR("%s: dm_pool_get_data_dev_size returned %d",
2572                               dm_device_name(pool->pool_md), r);
2573                         goto err;
2574                 }
2575
2576                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2577                 if (r) {
2578                         DMERR("%s: dm_pool_get_metadata_snap returned %d",
2579                               dm_device_name(pool->pool_md), r);
2580                         goto err;
2581                 }
2582
2583                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2584                        (unsigned long long)transaction_id,
2585                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2586                        (unsigned long long)nr_blocks_metadata,
2587                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2588                        (unsigned long long)nr_blocks_data);
2589
2590                 if (held_root)
2591                         DMEMIT("%llu ", held_root);
2592                 else
2593                         DMEMIT("- ");
2594
2595                 if (pool->pf.mode == PM_READ_ONLY)
2596                         DMEMIT("ro ");
2597                 else
2598                         DMEMIT("rw ");
2599
2600                 if (!pool->pf.discard_enabled)
2601                         DMEMIT("ignore_discard");
2602                 else if (pool->pf.discard_passdown)
2603                         DMEMIT("discard_passdown");
2604                 else
2605                         DMEMIT("no_discard_passdown");
2606
2607                 break;
2608
2609         case STATUSTYPE_TABLE:
2610                 DMEMIT("%s %s %lu %llu ",
2611                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2612                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2613                        (unsigned long)pool->sectors_per_block,
2614                        (unsigned long long)pt->low_water_blocks);
2615                 emit_flags(&pt->requested_pf, result, sz, maxlen);
2616                 break;
2617         }
2618         return;
2619
2620 err:
2621         DMEMIT("Error");
2622 }
2623
2624 static int pool_iterate_devices(struct dm_target *ti,
2625                                 iterate_devices_callout_fn fn, void *data)
2626 {
2627         struct pool_c *pt = ti->private;
2628
2629         return fn(ti, pt->data_dev, 0, ti->len, data);
2630 }
2631
2632 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2633                       struct bio_vec *biovec, int max_size)
2634 {
2635         struct pool_c *pt = ti->private;
2636         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2637
2638         if (!q->merge_bvec_fn)
2639                 return max_size;
2640
2641         bvm->bi_bdev = pt->data_dev->bdev;
2642
2643         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2644 }
2645
2646 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2647 {
2648         struct pool *pool = pt->pool;
2649         struct queue_limits *data_limits;
2650
2651         limits->max_discard_sectors = pool->sectors_per_block;
2652
2653         /*
2654          * discard_granularity is just a hint, and not enforced.
2655          */
2656         if (pt->adjusted_pf.discard_passdown) {
2657                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2658                 limits->discard_granularity = data_limits->discard_granularity;
2659         } else
2660                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2661 }
2662
2663 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2664 {
2665         struct pool_c *pt = ti->private;
2666         struct pool *pool = pt->pool;
2667         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
2668
2669         /*
2670          * If the system-determined stacked limits are compatible with the
2671          * pool's blocksize (io_opt is a factor) do not override them.
2672          */
2673         if (io_opt_sectors < pool->sectors_per_block ||
2674             do_div(io_opt_sectors, pool->sectors_per_block)) {
2675                 blk_limits_io_min(limits, 0);
2676                 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2677         }
2678
2679         /*
2680          * pt->adjusted_pf is a staging area for the actual features to use.
2681          * They get transferred to the live pool in bind_control_target()
2682          * called from pool_preresume().
2683          */
2684         if (!pt->adjusted_pf.discard_enabled)
2685                 return;
2686
2687         disable_passdown_if_not_supported(pt);
2688
2689         set_discard_limits(pt, limits);
2690 }
2691
2692 static struct target_type pool_target = {
2693         .name = "thin-pool",
2694         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2695                     DM_TARGET_IMMUTABLE,
2696         .version = {1, 8, 0},
2697         .module = THIS_MODULE,
2698         .ctr = pool_ctr,
2699         .dtr = pool_dtr,
2700         .map = pool_map,
2701         .postsuspend = pool_postsuspend,
2702         .preresume = pool_preresume,
2703         .resume = pool_resume,
2704         .message = pool_message,
2705         .status = pool_status,
2706         .merge = pool_merge,
2707         .iterate_devices = pool_iterate_devices,
2708         .io_hints = pool_io_hints,
2709 };
2710
2711 /*----------------------------------------------------------------
2712  * Thin target methods
2713  *--------------------------------------------------------------*/
2714 static void thin_dtr(struct dm_target *ti)
2715 {
2716         struct thin_c *tc = ti->private;
2717
2718         mutex_lock(&dm_thin_pool_table.mutex);
2719
2720         __pool_dec(tc->pool);
2721         dm_pool_close_thin_device(tc->td);
2722         dm_put_device(ti, tc->pool_dev);
2723         if (tc->origin_dev)
2724                 dm_put_device(ti, tc->origin_dev);
2725         kfree(tc);
2726
2727         mutex_unlock(&dm_thin_pool_table.mutex);
2728 }
2729
2730 /*
2731  * Thin target parameters:
2732  *
2733  * <pool_dev> <dev_id> [origin_dev]
2734  *
2735  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2736  * dev_id: the internal device identifier
2737  * origin_dev: a device external to the pool that should act as the origin
2738  *
2739  * If the pool device has discards disabled, they get disabled for the thin
2740  * device as well.
2741  */
2742 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2743 {
2744         int r;
2745         struct thin_c *tc;
2746         struct dm_dev *pool_dev, *origin_dev;
2747         struct mapped_device *pool_md;
2748
2749         mutex_lock(&dm_thin_pool_table.mutex);
2750
2751         if (argc != 2 && argc != 3) {
2752                 ti->error = "Invalid argument count";
2753                 r = -EINVAL;
2754                 goto out_unlock;
2755         }
2756
2757         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2758         if (!tc) {
2759                 ti->error = "Out of memory";
2760                 r = -ENOMEM;
2761                 goto out_unlock;
2762         }
2763
2764         if (argc == 3) {
2765                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2766                 if (r) {
2767                         ti->error = "Error opening origin device";
2768                         goto bad_origin_dev;
2769                 }
2770                 tc->origin_dev = origin_dev;
2771         }
2772
2773         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2774         if (r) {
2775                 ti->error = "Error opening pool device";
2776                 goto bad_pool_dev;
2777         }
2778         tc->pool_dev = pool_dev;
2779
2780         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2781                 ti->error = "Invalid device id";
2782                 r = -EINVAL;
2783                 goto bad_common;
2784         }
2785
2786         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2787         if (!pool_md) {
2788                 ti->error = "Couldn't get pool mapped device";
2789                 r = -EINVAL;
2790                 goto bad_common;
2791         }
2792
2793         tc->pool = __pool_table_lookup(pool_md);
2794         if (!tc->pool) {
2795                 ti->error = "Couldn't find pool object";
2796                 r = -EINVAL;
2797                 goto bad_pool_lookup;
2798         }
2799         __pool_inc(tc->pool);
2800
2801         if (get_pool_mode(tc->pool) == PM_FAIL) {
2802                 ti->error = "Couldn't open thin device, Pool is in fail mode";
2803                 goto bad_thin_open;
2804         }
2805
2806         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2807         if (r) {
2808                 ti->error = "Couldn't open thin internal device";
2809                 goto bad_thin_open;
2810         }
2811
2812         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2813         if (r)
2814                 goto bad_thin_open;
2815
2816         ti->num_flush_bios = 1;
2817         ti->flush_supported = true;
2818         ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2819
2820         /* In case the pool supports discards, pass them on. */
2821         if (tc->pool->pf.discard_enabled) {
2822                 ti->discards_supported = true;
2823                 ti->num_discard_bios = 1;
2824                 ti->discard_zeroes_data_unsupported = true;
2825                 /* Discard bios must be split on a block boundary */
2826                 ti->split_discard_bios = true;
2827         }
2828
2829         dm_put(pool_md);
2830
2831         mutex_unlock(&dm_thin_pool_table.mutex);
2832
2833         return 0;
2834
2835 bad_thin_open:
2836         __pool_dec(tc->pool);
2837 bad_pool_lookup:
2838         dm_put(pool_md);
2839 bad_common:
2840         dm_put_device(ti, tc->pool_dev);
2841 bad_pool_dev:
2842         if (tc->origin_dev)
2843                 dm_put_device(ti, tc->origin_dev);
2844 bad_origin_dev:
2845         kfree(tc);
2846 out_unlock:
2847         mutex_unlock(&dm_thin_pool_table.mutex);
2848
2849         return r;
2850 }
2851
2852 static int thin_map(struct dm_target *ti, struct bio *bio)
2853 {
2854         bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2855
2856         return thin_bio_map(ti, bio);
2857 }
2858
2859 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2860 {
2861         unsigned long flags;
2862         struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2863         struct list_head work;
2864         struct dm_thin_new_mapping *m, *tmp;
2865         struct pool *pool = h->tc->pool;
2866
2867         if (h->shared_read_entry) {
2868                 INIT_LIST_HEAD(&work);
2869                 dm_deferred_entry_dec(h->shared_read_entry, &work);
2870
2871                 spin_lock_irqsave(&pool->lock, flags);
2872                 list_for_each_entry_safe(m, tmp, &work, list) {
2873                         list_del(&m->list);
2874                         m->quiesced = 1;
2875                         __maybe_add_mapping(m);
2876                 }
2877                 spin_unlock_irqrestore(&pool->lock, flags);
2878         }
2879
2880         if (h->all_io_entry) {
2881                 INIT_LIST_HEAD(&work);
2882                 dm_deferred_entry_dec(h->all_io_entry, &work);
2883                 if (!list_empty(&work)) {
2884                         spin_lock_irqsave(&pool->lock, flags);
2885                         list_for_each_entry_safe(m, tmp, &work, list)
2886                                 list_add(&m->list, &pool->prepared_discards);
2887                         spin_unlock_irqrestore(&pool->lock, flags);
2888                         wake_worker(pool);
2889                 }
2890         }
2891
2892         return 0;
2893 }
2894
2895 static void thin_postsuspend(struct dm_target *ti)
2896 {
2897         if (dm_noflush_suspending(ti))
2898                 requeue_io((struct thin_c *)ti->private);
2899 }
2900
2901 /*
2902  * <nr mapped sectors> <highest mapped sector>
2903  */
2904 static void thin_status(struct dm_target *ti, status_type_t type,
2905                         unsigned status_flags, char *result, unsigned maxlen)
2906 {
2907         int r;
2908         ssize_t sz = 0;
2909         dm_block_t mapped, highest;
2910         char buf[BDEVNAME_SIZE];
2911         struct thin_c *tc = ti->private;
2912
2913         if (get_pool_mode(tc->pool) == PM_FAIL) {
2914                 DMEMIT("Fail");
2915                 return;
2916         }
2917
2918         if (!tc->td)
2919                 DMEMIT("-");
2920         else {
2921                 switch (type) {
2922                 case STATUSTYPE_INFO:
2923                         r = dm_thin_get_mapped_count(tc->td, &mapped);
2924                         if (r) {
2925                                 DMERR("dm_thin_get_mapped_count returned %d", r);
2926                                 goto err;
2927                         }
2928
2929                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2930                         if (r < 0) {
2931                                 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2932                                 goto err;
2933                         }
2934
2935                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2936                         if (r)
2937                                 DMEMIT("%llu", ((highest + 1) *
2938                                                 tc->pool->sectors_per_block) - 1);
2939                         else
2940                                 DMEMIT("-");
2941                         break;
2942
2943                 case STATUSTYPE_TABLE:
2944                         DMEMIT("%s %lu",
2945                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2946                                (unsigned long) tc->dev_id);
2947                         if (tc->origin_dev)
2948                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2949                         break;
2950                 }
2951         }
2952
2953         return;
2954
2955 err:
2956         DMEMIT("Error");
2957 }
2958
2959 static int thin_iterate_devices(struct dm_target *ti,
2960                                 iterate_devices_callout_fn fn, void *data)
2961 {
2962         sector_t blocks;
2963         struct thin_c *tc = ti->private;
2964         struct pool *pool = tc->pool;
2965
2966         /*
2967          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2968          * we follow a more convoluted path through to the pool's target.
2969          */
2970         if (!pool->ti)
2971                 return 0;       /* nothing is bound */
2972
2973         blocks = pool->ti->len;
2974         (void) sector_div(blocks, pool->sectors_per_block);
2975         if (blocks)
2976                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
2977
2978         return 0;
2979 }
2980
2981 static struct target_type thin_target = {
2982         .name = "thin",
2983         .version = {1, 8, 0},
2984         .module = THIS_MODULE,
2985         .ctr = thin_ctr,
2986         .dtr = thin_dtr,
2987         .map = thin_map,
2988         .end_io = thin_endio,
2989         .postsuspend = thin_postsuspend,
2990         .status = thin_status,
2991         .iterate_devices = thin_iterate_devices,
2992 };
2993
2994 /*----------------------------------------------------------------*/
2995
2996 static int __init dm_thin_init(void)
2997 {
2998         int r;
2999
3000         pool_table_init();
3001
3002         r = dm_register_target(&thin_target);
3003         if (r)
3004                 return r;
3005
3006         r = dm_register_target(&pool_target);
3007         if (r)
3008                 goto bad_pool_target;
3009
3010         r = -ENOMEM;
3011
3012         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3013         if (!_new_mapping_cache)
3014                 goto bad_new_mapping_cache;
3015
3016         return 0;
3017
3018 bad_new_mapping_cache:
3019         dm_unregister_target(&pool_target);
3020 bad_pool_target:
3021         dm_unregister_target(&thin_target);
3022
3023         return r;
3024 }
3025
3026 static void dm_thin_exit(void)
3027 {
3028         dm_unregister_target(&thin_target);
3029         dm_unregister_target(&pool_target);
3030
3031         kmem_cache_destroy(_new_mapping_cache);
3032 }
3033
3034 module_init(dm_thin_init);
3035 module_exit(dm_thin_exit);
3036
3037 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
3038 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3039 MODULE_LICENSE("GPL");