dm thin: rename cell_defer_except to cell_defer_no_holder
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / dm-thin.c
1 /*
2  * Copyright (C) 2011-2012 Red Hat UK.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX   "thin"
20
21 /*
22  * Tunable constants
23  */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 /*
30  * The block size of the device holding pool data must be
31  * between 64KB and 1GB.
32  */
33 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
35
36 /*
37  * Device id is restricted to 24 bits.
38  */
39 #define MAX_DEV_ID ((1 << 24) - 1)
40
41 /*
42  * How do we handle breaking sharing of data blocks?
43  * =================================================
44  *
45  * We use a standard copy-on-write btree to store the mappings for the
46  * devices (note I'm talking about copy-on-write of the metadata here, not
47  * the data).  When you take an internal snapshot you clone the root node
48  * of the origin btree.  After this there is no concept of an origin or a
49  * snapshot.  They are just two device trees that happen to point to the
50  * same data blocks.
51  *
52  * When we get a write in we decide if it's to a shared data block using
53  * some timestamp magic.  If it is, we have to break sharing.
54  *
55  * Let's say we write to a shared block in what was the origin.  The
56  * steps are:
57  *
58  * i) plug io further to this physical block. (see bio_prison code).
59  *
60  * ii) quiesce any read io to that shared data block.  Obviously
61  * including all devices that share this block.  (see dm_deferred_set code)
62  *
63  * iii) copy the data block to a newly allocate block.  This step can be
64  * missed out if the io covers the block. (schedule_copy).
65  *
66  * iv) insert the new mapping into the origin's btree
67  * (process_prepared_mapping).  This act of inserting breaks some
68  * sharing of btree nodes between the two devices.  Breaking sharing only
69  * effects the btree of that specific device.  Btrees for the other
70  * devices that share the block never change.  The btree for the origin
71  * device as it was after the last commit is untouched, ie. we're using
72  * persistent data structures in the functional programming sense.
73  *
74  * v) unplug io to this physical block, including the io that triggered
75  * the breaking of sharing.
76  *
77  * Steps (ii) and (iii) occur in parallel.
78  *
79  * The metadata _doesn't_ need to be committed before the io continues.  We
80  * get away with this because the io is always written to a _new_ block.
81  * If there's a crash, then:
82  *
83  * - The origin mapping will point to the old origin block (the shared
84  * one).  This will contain the data as it was before the io that triggered
85  * the breaking of sharing came in.
86  *
87  * - The snap mapping still points to the old block.  As it would after
88  * the commit.
89  *
90  * The downside of this scheme is the timestamp magic isn't perfect, and
91  * will continue to think that data block in the snapshot device is shared
92  * even after the write to the origin has broken sharing.  I suspect data
93  * blocks will typically be shared by many different devices, so we're
94  * breaking sharing n + 1 times, rather than n, where n is the number of
95  * devices that reference this data block.  At the moment I think the
96  * benefits far, far outweigh the disadvantages.
97  */
98
99 /*----------------------------------------------------------------*/
100
101 /*
102  * Key building.
103  */
104 static void build_data_key(struct dm_thin_device *td,
105                            dm_block_t b, struct dm_cell_key *key)
106 {
107         key->virtual = 0;
108         key->dev = dm_thin_dev_id(td);
109         key->block = b;
110 }
111
112 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
113                               struct dm_cell_key *key)
114 {
115         key->virtual = 1;
116         key->dev = dm_thin_dev_id(td);
117         key->block = b;
118 }
119
120 /*----------------------------------------------------------------*/
121
122 /*
123  * A pool device ties together a metadata device and a data device.  It
124  * also provides the interface for creating and destroying internal
125  * devices.
126  */
127 struct dm_thin_new_mapping;
128
129 /*
130  * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
131  */
132 enum pool_mode {
133         PM_WRITE,               /* metadata may be changed */
134         PM_READ_ONLY,           /* metadata may not be changed */
135         PM_FAIL,                /* all I/O fails */
136 };
137
138 struct pool_features {
139         enum pool_mode mode;
140
141         bool zero_new_blocks:1;
142         bool discard_enabled:1;
143         bool discard_passdown:1;
144 };
145
146 struct thin_c;
147 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
148 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
149
150 struct pool {
151         struct list_head list;
152         struct dm_target *ti;   /* Only set if a pool target is bound */
153
154         struct mapped_device *pool_md;
155         struct block_device *md_dev;
156         struct dm_pool_metadata *pmd;
157
158         dm_block_t low_water_blocks;
159         uint32_t sectors_per_block;
160         int sectors_per_block_shift;
161
162         struct pool_features pf;
163         unsigned low_water_triggered:1; /* A dm event has been sent */
164         unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
165
166         struct dm_bio_prison *prison;
167         struct dm_kcopyd_client *copier;
168
169         struct workqueue_struct *wq;
170         struct work_struct worker;
171         struct delayed_work waker;
172
173         unsigned long last_commit_jiffies;
174         unsigned ref_count;
175
176         spinlock_t lock;
177         struct bio_list deferred_bios;
178         struct bio_list deferred_flush_bios;
179         struct list_head prepared_mappings;
180         struct list_head prepared_discards;
181
182         struct bio_list retry_on_resume_list;
183
184         struct dm_deferred_set *shared_read_ds;
185         struct dm_deferred_set *all_io_ds;
186
187         struct dm_thin_new_mapping *next_mapping;
188         mempool_t *mapping_pool;
189         mempool_t *endio_hook_pool;
190
191         process_bio_fn process_bio;
192         process_bio_fn process_discard;
193
194         process_mapping_fn process_prepared_mapping;
195         process_mapping_fn process_prepared_discard;
196 };
197
198 static enum pool_mode get_pool_mode(struct pool *pool);
199 static void set_pool_mode(struct pool *pool, enum pool_mode mode);
200
201 /*
202  * Target context for a pool.
203  */
204 struct pool_c {
205         struct dm_target *ti;
206         struct pool *pool;
207         struct dm_dev *data_dev;
208         struct dm_dev *metadata_dev;
209         struct dm_target_callbacks callbacks;
210
211         dm_block_t low_water_blocks;
212         struct pool_features requested_pf; /* Features requested during table load */
213         struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
214 };
215
216 /*
217  * Target context for a thin.
218  */
219 struct thin_c {
220         struct dm_dev *pool_dev;
221         struct dm_dev *origin_dev;
222         dm_thin_id dev_id;
223
224         struct pool *pool;
225         struct dm_thin_device *td;
226 };
227
228 /*----------------------------------------------------------------*/
229
230 /*
231  * A global list of pools that uses a struct mapped_device as a key.
232  */
233 static struct dm_thin_pool_table {
234         struct mutex mutex;
235         struct list_head pools;
236 } dm_thin_pool_table;
237
238 static void pool_table_init(void)
239 {
240         mutex_init(&dm_thin_pool_table.mutex);
241         INIT_LIST_HEAD(&dm_thin_pool_table.pools);
242 }
243
244 static void __pool_table_insert(struct pool *pool)
245 {
246         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
247         list_add(&pool->list, &dm_thin_pool_table.pools);
248 }
249
250 static void __pool_table_remove(struct pool *pool)
251 {
252         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
253         list_del(&pool->list);
254 }
255
256 static struct pool *__pool_table_lookup(struct mapped_device *md)
257 {
258         struct pool *pool = NULL, *tmp;
259
260         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
261
262         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
263                 if (tmp->pool_md == md) {
264                         pool = tmp;
265                         break;
266                 }
267         }
268
269         return pool;
270 }
271
272 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
273 {
274         struct pool *pool = NULL, *tmp;
275
276         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
277
278         list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
279                 if (tmp->md_dev == md_dev) {
280                         pool = tmp;
281                         break;
282                 }
283         }
284
285         return pool;
286 }
287
288 /*----------------------------------------------------------------*/
289
290 struct dm_thin_endio_hook {
291         struct thin_c *tc;
292         struct dm_deferred_entry *shared_read_entry;
293         struct dm_deferred_entry *all_io_entry;
294         struct dm_thin_new_mapping *overwrite_mapping;
295 };
296
297 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
298 {
299         struct bio *bio;
300         struct bio_list bios;
301
302         bio_list_init(&bios);
303         bio_list_merge(&bios, master);
304         bio_list_init(master);
305
306         while ((bio = bio_list_pop(&bios))) {
307                 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
308
309                 if (h->tc == tc)
310                         bio_endio(bio, DM_ENDIO_REQUEUE);
311                 else
312                         bio_list_add(master, bio);
313         }
314 }
315
316 static void requeue_io(struct thin_c *tc)
317 {
318         struct pool *pool = tc->pool;
319         unsigned long flags;
320
321         spin_lock_irqsave(&pool->lock, flags);
322         __requeue_bio_list(tc, &pool->deferred_bios);
323         __requeue_bio_list(tc, &pool->retry_on_resume_list);
324         spin_unlock_irqrestore(&pool->lock, flags);
325 }
326
327 /*
328  * This section of code contains the logic for processing a thin device's IO.
329  * Much of the code depends on pool object resources (lists, workqueues, etc)
330  * but most is exclusively called from the thin target rather than the thin-pool
331  * target.
332  */
333
334 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
335 {
336         sector_t block_nr = bio->bi_sector;
337
338         if (tc->pool->sectors_per_block_shift < 0)
339                 (void) sector_div(block_nr, tc->pool->sectors_per_block);
340         else
341                 block_nr >>= tc->pool->sectors_per_block_shift;
342
343         return block_nr;
344 }
345
346 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
347 {
348         struct pool *pool = tc->pool;
349         sector_t bi_sector = bio->bi_sector;
350
351         bio->bi_bdev = tc->pool_dev->bdev;
352         if (tc->pool->sectors_per_block_shift < 0)
353                 bio->bi_sector = (block * pool->sectors_per_block) +
354                                  sector_div(bi_sector, pool->sectors_per_block);
355         else
356                 bio->bi_sector = (block << pool->sectors_per_block_shift) |
357                                 (bi_sector & (pool->sectors_per_block - 1));
358 }
359
360 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
361 {
362         bio->bi_bdev = tc->origin_dev->bdev;
363 }
364
365 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
366 {
367         return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
368                 dm_thin_changed_this_transaction(tc->td);
369 }
370
371 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
372 {
373         struct dm_thin_endio_hook *h;
374
375         if (bio->bi_rw & REQ_DISCARD)
376                 return;
377
378         h = dm_get_mapinfo(bio)->ptr;
379         h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
380 }
381
382 static void issue(struct thin_c *tc, struct bio *bio)
383 {
384         struct pool *pool = tc->pool;
385         unsigned long flags;
386
387         if (!bio_triggers_commit(tc, bio)) {
388                 generic_make_request(bio);
389                 return;
390         }
391
392         /*
393          * Complete bio with an error if earlier I/O caused changes to
394          * the metadata that can't be committed e.g, due to I/O errors
395          * on the metadata device.
396          */
397         if (dm_thin_aborted_changes(tc->td)) {
398                 bio_io_error(bio);
399                 return;
400         }
401
402         /*
403          * Batch together any bios that trigger commits and then issue a
404          * single commit for them in process_deferred_bios().
405          */
406         spin_lock_irqsave(&pool->lock, flags);
407         bio_list_add(&pool->deferred_flush_bios, bio);
408         spin_unlock_irqrestore(&pool->lock, flags);
409 }
410
411 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
412 {
413         remap_to_origin(tc, bio);
414         issue(tc, bio);
415 }
416
417 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
418                             dm_block_t block)
419 {
420         remap(tc, bio, block);
421         issue(tc, bio);
422 }
423
424 /*
425  * wake_worker() is used when new work is queued and when pool_resume is
426  * ready to continue deferred IO processing.
427  */
428 static void wake_worker(struct pool *pool)
429 {
430         queue_work(pool->wq, &pool->worker);
431 }
432
433 /*----------------------------------------------------------------*/
434
435 /*
436  * Bio endio functions.
437  */
438 struct dm_thin_new_mapping {
439         struct list_head list;
440
441         unsigned quiesced:1;
442         unsigned prepared:1;
443         unsigned pass_discard:1;
444
445         struct thin_c *tc;
446         dm_block_t virt_block;
447         dm_block_t data_block;
448         struct dm_bio_prison_cell *cell, *cell2;
449         int err;
450
451         /*
452          * If the bio covers the whole area of a block then we can avoid
453          * zeroing or copying.  Instead this bio is hooked.  The bio will
454          * still be in the cell, so care has to be taken to avoid issuing
455          * the bio twice.
456          */
457         struct bio *bio;
458         bio_end_io_t *saved_bi_end_io;
459 };
460
461 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
462 {
463         struct pool *pool = m->tc->pool;
464
465         if (m->quiesced && m->prepared) {
466                 list_add(&m->list, &pool->prepared_mappings);
467                 wake_worker(pool);
468         }
469 }
470
471 static void copy_complete(int read_err, unsigned long write_err, void *context)
472 {
473         unsigned long flags;
474         struct dm_thin_new_mapping *m = context;
475         struct pool *pool = m->tc->pool;
476
477         m->err = read_err || write_err ? -EIO : 0;
478
479         spin_lock_irqsave(&pool->lock, flags);
480         m->prepared = 1;
481         __maybe_add_mapping(m);
482         spin_unlock_irqrestore(&pool->lock, flags);
483 }
484
485 static void overwrite_endio(struct bio *bio, int err)
486 {
487         unsigned long flags;
488         struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
489         struct dm_thin_new_mapping *m = h->overwrite_mapping;
490         struct pool *pool = m->tc->pool;
491
492         m->err = err;
493
494         spin_lock_irqsave(&pool->lock, flags);
495         m->prepared = 1;
496         __maybe_add_mapping(m);
497         spin_unlock_irqrestore(&pool->lock, flags);
498 }
499
500 /*----------------------------------------------------------------*/
501
502 /*
503  * Workqueue.
504  */
505
506 /*
507  * Prepared mapping jobs.
508  */
509
510 /*
511  * This sends the bios in the cell back to the deferred_bios list.
512  */
513 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
514                        dm_block_t data_block)
515 {
516         struct pool *pool = tc->pool;
517         unsigned long flags;
518
519         spin_lock_irqsave(&pool->lock, flags);
520         dm_cell_release(cell, &pool->deferred_bios);
521         spin_unlock_irqrestore(&tc->pool->lock, flags);
522
523         wake_worker(pool);
524 }
525
526 /*
527  * Same as cell_defer except it omits the original holder of the cell.
528  */
529 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
530 {
531         struct bio_list bios;
532         struct pool *pool = tc->pool;
533         unsigned long flags;
534
535         bio_list_init(&bios);
536
537         spin_lock_irqsave(&pool->lock, flags);
538         dm_cell_release_no_holder(cell, &pool->deferred_bios);
539         spin_unlock_irqrestore(&pool->lock, flags);
540
541         wake_worker(pool);
542 }
543
544 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
545 {
546         if (m->bio)
547                 m->bio->bi_end_io = m->saved_bi_end_io;
548         dm_cell_error(m->cell);
549         list_del(&m->list);
550         mempool_free(m, m->tc->pool->mapping_pool);
551 }
552 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
553 {
554         struct thin_c *tc = m->tc;
555         struct bio *bio;
556         int r;
557
558         bio = m->bio;
559         if (bio)
560                 bio->bi_end_io = m->saved_bi_end_io;
561
562         if (m->err) {
563                 dm_cell_error(m->cell);
564                 goto out;
565         }
566
567         /*
568          * Commit the prepared block into the mapping btree.
569          * Any I/O for this block arriving after this point will get
570          * remapped to it directly.
571          */
572         r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
573         if (r) {
574                 DMERR("dm_thin_insert_block() failed");
575                 dm_cell_error(m->cell);
576                 goto out;
577         }
578
579         /*
580          * Release any bios held while the block was being provisioned.
581          * If we are processing a write bio that completely covers the block,
582          * we already processed it so can ignore it now when processing
583          * the bios in the cell.
584          */
585         if (bio) {
586                 cell_defer_no_holder(tc, m->cell);
587                 bio_endio(bio, 0);
588         } else
589                 cell_defer(tc, m->cell, m->data_block);
590
591 out:
592         list_del(&m->list);
593         mempool_free(m, tc->pool->mapping_pool);
594 }
595
596 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
597 {
598         struct thin_c *tc = m->tc;
599
600         bio_io_error(m->bio);
601         cell_defer_no_holder(tc, m->cell);
602         cell_defer_no_holder(tc, m->cell2);
603         mempool_free(m, tc->pool->mapping_pool);
604 }
605
606 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
607 {
608         struct thin_c *tc = m->tc;
609
610         inc_all_io_entry(tc->pool, m->bio);
611         cell_defer_no_holder(tc, m->cell);
612         cell_defer_no_holder(tc, m->cell2);
613
614         if (m->pass_discard)
615                 remap_and_issue(tc, m->bio, m->data_block);
616         else
617                 bio_endio(m->bio, 0);
618
619         mempool_free(m, tc->pool->mapping_pool);
620 }
621
622 static void process_prepared_discard(struct dm_thin_new_mapping *m)
623 {
624         int r;
625         struct thin_c *tc = m->tc;
626
627         r = dm_thin_remove_block(tc->td, m->virt_block);
628         if (r)
629                 DMERR("dm_thin_remove_block() failed");
630
631         process_prepared_discard_passdown(m);
632 }
633
634 static void process_prepared(struct pool *pool, struct list_head *head,
635                              process_mapping_fn *fn)
636 {
637         unsigned long flags;
638         struct list_head maps;
639         struct dm_thin_new_mapping *m, *tmp;
640
641         INIT_LIST_HEAD(&maps);
642         spin_lock_irqsave(&pool->lock, flags);
643         list_splice_init(head, &maps);
644         spin_unlock_irqrestore(&pool->lock, flags);
645
646         list_for_each_entry_safe(m, tmp, &maps, list)
647                 (*fn)(m);
648 }
649
650 /*
651  * Deferred bio jobs.
652  */
653 static int io_overlaps_block(struct pool *pool, struct bio *bio)
654 {
655         return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
656 }
657
658 static int io_overwrites_block(struct pool *pool, struct bio *bio)
659 {
660         return (bio_data_dir(bio) == WRITE) &&
661                 io_overlaps_block(pool, bio);
662 }
663
664 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
665                                bio_end_io_t *fn)
666 {
667         *save = bio->bi_end_io;
668         bio->bi_end_io = fn;
669 }
670
671 static int ensure_next_mapping(struct pool *pool)
672 {
673         if (pool->next_mapping)
674                 return 0;
675
676         pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
677
678         return pool->next_mapping ? 0 : -ENOMEM;
679 }
680
681 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
682 {
683         struct dm_thin_new_mapping *r = pool->next_mapping;
684
685         BUG_ON(!pool->next_mapping);
686
687         pool->next_mapping = NULL;
688
689         return r;
690 }
691
692 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
693                           struct dm_dev *origin, dm_block_t data_origin,
694                           dm_block_t data_dest,
695                           struct dm_bio_prison_cell *cell, struct bio *bio)
696 {
697         int r;
698         struct pool *pool = tc->pool;
699         struct dm_thin_new_mapping *m = get_next_mapping(pool);
700
701         INIT_LIST_HEAD(&m->list);
702         m->quiesced = 0;
703         m->prepared = 0;
704         m->tc = tc;
705         m->virt_block = virt_block;
706         m->data_block = data_dest;
707         m->cell = cell;
708         m->err = 0;
709         m->bio = NULL;
710
711         if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
712                 m->quiesced = 1;
713
714         /*
715          * IO to pool_dev remaps to the pool target's data_dev.
716          *
717          * If the whole block of data is being overwritten, we can issue the
718          * bio immediately. Otherwise we use kcopyd to clone the data first.
719          */
720         if (io_overwrites_block(pool, bio)) {
721                 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
722
723                 h->overwrite_mapping = m;
724                 m->bio = bio;
725                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
726                 inc_all_io_entry(pool, bio);
727                 remap_and_issue(tc, bio, data_dest);
728         } else {
729                 struct dm_io_region from, to;
730
731                 from.bdev = origin->bdev;
732                 from.sector = data_origin * pool->sectors_per_block;
733                 from.count = pool->sectors_per_block;
734
735                 to.bdev = tc->pool_dev->bdev;
736                 to.sector = data_dest * pool->sectors_per_block;
737                 to.count = pool->sectors_per_block;
738
739                 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
740                                    0, copy_complete, m);
741                 if (r < 0) {
742                         mempool_free(m, pool->mapping_pool);
743                         DMERR("dm_kcopyd_copy() failed");
744                         dm_cell_error(cell);
745                 }
746         }
747 }
748
749 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
750                                    dm_block_t data_origin, dm_block_t data_dest,
751                                    struct dm_bio_prison_cell *cell, struct bio *bio)
752 {
753         schedule_copy(tc, virt_block, tc->pool_dev,
754                       data_origin, data_dest, cell, bio);
755 }
756
757 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
758                                    dm_block_t data_dest,
759                                    struct dm_bio_prison_cell *cell, struct bio *bio)
760 {
761         schedule_copy(tc, virt_block, tc->origin_dev,
762                       virt_block, data_dest, cell, bio);
763 }
764
765 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
766                           dm_block_t data_block, struct dm_bio_prison_cell *cell,
767                           struct bio *bio)
768 {
769         struct pool *pool = tc->pool;
770         struct dm_thin_new_mapping *m = get_next_mapping(pool);
771
772         INIT_LIST_HEAD(&m->list);
773         m->quiesced = 1;
774         m->prepared = 0;
775         m->tc = tc;
776         m->virt_block = virt_block;
777         m->data_block = data_block;
778         m->cell = cell;
779         m->err = 0;
780         m->bio = NULL;
781
782         /*
783          * If the whole block of data is being overwritten or we are not
784          * zeroing pre-existing data, we can issue the bio immediately.
785          * Otherwise we use kcopyd to zero the data first.
786          */
787         if (!pool->pf.zero_new_blocks)
788                 process_prepared_mapping(m);
789
790         else if (io_overwrites_block(pool, bio)) {
791                 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
792
793                 h->overwrite_mapping = m;
794                 m->bio = bio;
795                 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
796                 inc_all_io_entry(pool, bio);
797                 remap_and_issue(tc, bio, data_block);
798         } else {
799                 int r;
800                 struct dm_io_region to;
801
802                 to.bdev = tc->pool_dev->bdev;
803                 to.sector = data_block * pool->sectors_per_block;
804                 to.count = pool->sectors_per_block;
805
806                 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
807                 if (r < 0) {
808                         mempool_free(m, pool->mapping_pool);
809                         DMERR("dm_kcopyd_zero() failed");
810                         dm_cell_error(cell);
811                 }
812         }
813 }
814
815 static int commit(struct pool *pool)
816 {
817         int r;
818
819         r = dm_pool_commit_metadata(pool->pmd);
820         if (r)
821                 DMERR("commit failed, error = %d", r);
822
823         return r;
824 }
825
826 /*
827  * A non-zero return indicates read_only or fail_io mode.
828  * Many callers don't care about the return value.
829  */
830 static int commit_or_fallback(struct pool *pool)
831 {
832         int r;
833
834         if (get_pool_mode(pool) != PM_WRITE)
835                 return -EINVAL;
836
837         r = commit(pool);
838         if (r)
839                 set_pool_mode(pool, PM_READ_ONLY);
840
841         return r;
842 }
843
844 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
845 {
846         int r;
847         dm_block_t free_blocks;
848         unsigned long flags;
849         struct pool *pool = tc->pool;
850
851         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
852         if (r)
853                 return r;
854
855         if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
856                 DMWARN("%s: reached low water mark, sending event.",
857                        dm_device_name(pool->pool_md));
858                 spin_lock_irqsave(&pool->lock, flags);
859                 pool->low_water_triggered = 1;
860                 spin_unlock_irqrestore(&pool->lock, flags);
861                 dm_table_event(pool->ti->table);
862         }
863
864         if (!free_blocks) {
865                 if (pool->no_free_space)
866                         return -ENOSPC;
867                 else {
868                         /*
869                          * Try to commit to see if that will free up some
870                          * more space.
871                          */
872                         (void) commit_or_fallback(pool);
873
874                         r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
875                         if (r)
876                                 return r;
877
878                         /*
879                          * If we still have no space we set a flag to avoid
880                          * doing all this checking and return -ENOSPC.
881                          */
882                         if (!free_blocks) {
883                                 DMWARN("%s: no free space available.",
884                                        dm_device_name(pool->pool_md));
885                                 spin_lock_irqsave(&pool->lock, flags);
886                                 pool->no_free_space = 1;
887                                 spin_unlock_irqrestore(&pool->lock, flags);
888                                 return -ENOSPC;
889                         }
890                 }
891         }
892
893         r = dm_pool_alloc_data_block(pool->pmd, result);
894         if (r)
895                 return r;
896
897         return 0;
898 }
899
900 /*
901  * If we have run out of space, queue bios until the device is
902  * resumed, presumably after having been reloaded with more space.
903  */
904 static void retry_on_resume(struct bio *bio)
905 {
906         struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
907         struct thin_c *tc = h->tc;
908         struct pool *pool = tc->pool;
909         unsigned long flags;
910
911         spin_lock_irqsave(&pool->lock, flags);
912         bio_list_add(&pool->retry_on_resume_list, bio);
913         spin_unlock_irqrestore(&pool->lock, flags);
914 }
915
916 static void no_space(struct dm_bio_prison_cell *cell)
917 {
918         struct bio *bio;
919         struct bio_list bios;
920
921         bio_list_init(&bios);
922         dm_cell_release(cell, &bios);
923
924         while ((bio = bio_list_pop(&bios)))
925                 retry_on_resume(bio);
926 }
927
928 static void process_discard(struct thin_c *tc, struct bio *bio)
929 {
930         int r;
931         unsigned long flags;
932         struct pool *pool = tc->pool;
933         struct dm_bio_prison_cell *cell, *cell2;
934         struct dm_cell_key key, key2;
935         dm_block_t block = get_bio_block(tc, bio);
936         struct dm_thin_lookup_result lookup_result;
937         struct dm_thin_new_mapping *m;
938
939         build_virtual_key(tc->td, block, &key);
940         if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
941                 return;
942
943         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
944         switch (r) {
945         case 0:
946                 /*
947                  * Check nobody is fiddling with this pool block.  This can
948                  * happen if someone's in the process of breaking sharing
949                  * on this block.
950                  */
951                 build_data_key(tc->td, lookup_result.block, &key2);
952                 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
953                         cell_defer_no_holder(tc, cell);
954                         break;
955                 }
956
957                 if (io_overlaps_block(pool, bio)) {
958                         /*
959                          * IO may still be going to the destination block.  We must
960                          * quiesce before we can do the removal.
961                          */
962                         m = get_next_mapping(pool);
963                         m->tc = tc;
964                         m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
965                         m->virt_block = block;
966                         m->data_block = lookup_result.block;
967                         m->cell = cell;
968                         m->cell2 = cell2;
969                         m->err = 0;
970                         m->bio = bio;
971
972                         if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
973                                 spin_lock_irqsave(&pool->lock, flags);
974                                 list_add(&m->list, &pool->prepared_discards);
975                                 spin_unlock_irqrestore(&pool->lock, flags);
976                                 wake_worker(pool);
977                         }
978                 } else {
979                         inc_all_io_entry(pool, bio);
980                         cell_defer_no_holder(tc, cell);
981                         cell_defer_no_holder(tc, cell2);
982
983                         /*
984                          * The DM core makes sure that the discard doesn't span
985                          * a block boundary.  So we submit the discard of a
986                          * partial block appropriately.
987                          */
988                         if ((!lookup_result.shared) && pool->pf.discard_passdown)
989                                 remap_and_issue(tc, bio, lookup_result.block);
990                         else
991                                 bio_endio(bio, 0);
992                 }
993                 break;
994
995         case -ENODATA:
996                 /*
997                  * It isn't provisioned, just forget it.
998                  */
999                 cell_defer_no_holder(tc, cell);
1000                 bio_endio(bio, 0);
1001                 break;
1002
1003         default:
1004                 DMERR("discard: find block unexpectedly returned %d", r);
1005                 cell_defer_no_holder(tc, cell);
1006                 bio_io_error(bio);
1007                 break;
1008         }
1009 }
1010
1011 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1012                           struct dm_cell_key *key,
1013                           struct dm_thin_lookup_result *lookup_result,
1014                           struct dm_bio_prison_cell *cell)
1015 {
1016         int r;
1017         dm_block_t data_block;
1018
1019         r = alloc_data_block(tc, &data_block);
1020         switch (r) {
1021         case 0:
1022                 schedule_internal_copy(tc, block, lookup_result->block,
1023                                        data_block, cell, bio);
1024                 break;
1025
1026         case -ENOSPC:
1027                 no_space(cell);
1028                 break;
1029
1030         default:
1031                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1032                 dm_cell_error(cell);
1033                 break;
1034         }
1035 }
1036
1037 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1038                                dm_block_t block,
1039                                struct dm_thin_lookup_result *lookup_result)
1040 {
1041         struct dm_bio_prison_cell *cell;
1042         struct pool *pool = tc->pool;
1043         struct dm_cell_key key;
1044
1045         /*
1046          * If cell is already occupied, then sharing is already in the process
1047          * of being broken so we have nothing further to do here.
1048          */
1049         build_data_key(tc->td, lookup_result->block, &key);
1050         if (dm_bio_detain(pool->prison, &key, bio, &cell))
1051                 return;
1052
1053         if (bio_data_dir(bio) == WRITE && bio->bi_size)
1054                 break_sharing(tc, bio, block, &key, lookup_result, cell);
1055         else {
1056                 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1057
1058                 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1059                 inc_all_io_entry(pool, bio);
1060                 cell_defer_no_holder(tc, cell);
1061
1062                 remap_and_issue(tc, bio, lookup_result->block);
1063         }
1064 }
1065
1066 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1067                             struct dm_bio_prison_cell *cell)
1068 {
1069         int r;
1070         dm_block_t data_block;
1071
1072         /*
1073          * Remap empty bios (flushes) immediately, without provisioning.
1074          */
1075         if (!bio->bi_size) {
1076                 inc_all_io_entry(tc->pool, bio);
1077                 cell_defer_no_holder(tc, cell);
1078
1079                 remap_and_issue(tc, bio, 0);
1080                 return;
1081         }
1082
1083         /*
1084          * Fill read bios with zeroes and complete them immediately.
1085          */
1086         if (bio_data_dir(bio) == READ) {
1087                 zero_fill_bio(bio);
1088                 cell_defer_no_holder(tc, cell);
1089                 bio_endio(bio, 0);
1090                 return;
1091         }
1092
1093         r = alloc_data_block(tc, &data_block);
1094         switch (r) {
1095         case 0:
1096                 if (tc->origin_dev)
1097                         schedule_external_copy(tc, block, data_block, cell, bio);
1098                 else
1099                         schedule_zero(tc, block, data_block, cell, bio);
1100                 break;
1101
1102         case -ENOSPC:
1103                 no_space(cell);
1104                 break;
1105
1106         default:
1107                 DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
1108                 set_pool_mode(tc->pool, PM_READ_ONLY);
1109                 dm_cell_error(cell);
1110                 break;
1111         }
1112 }
1113
1114 static void process_bio(struct thin_c *tc, struct bio *bio)
1115 {
1116         int r;
1117         dm_block_t block = get_bio_block(tc, bio);
1118         struct dm_bio_prison_cell *cell;
1119         struct dm_cell_key key;
1120         struct dm_thin_lookup_result lookup_result;
1121
1122         /*
1123          * If cell is already occupied, then the block is already
1124          * being provisioned so we have nothing further to do here.
1125          */
1126         build_virtual_key(tc->td, block, &key);
1127         if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1128                 return;
1129
1130         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1131         switch (r) {
1132         case 0:
1133                 if (lookup_result.shared) {
1134                         process_shared_bio(tc, bio, block, &lookup_result);
1135                         cell_defer_no_holder(tc, cell);
1136                 } else {
1137                         inc_all_io_entry(tc->pool, bio);
1138                         cell_defer_no_holder(tc, cell);
1139
1140                         remap_and_issue(tc, bio, lookup_result.block);
1141                 }
1142                 break;
1143
1144         case -ENODATA:
1145                 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1146                         inc_all_io_entry(tc->pool, bio);
1147                         cell_defer_no_holder(tc, cell);
1148
1149                         remap_to_origin_and_issue(tc, bio);
1150                 } else
1151                         provision_block(tc, bio, block, cell);
1152                 break;
1153
1154         default:
1155                 DMERR("dm_thin_find_block() failed, error = %d", r);
1156                 cell_defer_no_holder(tc, cell);
1157                 bio_io_error(bio);
1158                 break;
1159         }
1160 }
1161
1162 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1163 {
1164         int r;
1165         int rw = bio_data_dir(bio);
1166         dm_block_t block = get_bio_block(tc, bio);
1167         struct dm_thin_lookup_result lookup_result;
1168
1169         r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1170         switch (r) {
1171         case 0:
1172                 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1173                         bio_io_error(bio);
1174                 else {
1175                         inc_all_io_entry(tc->pool, bio);
1176                         remap_and_issue(tc, bio, lookup_result.block);
1177                 }
1178                 break;
1179
1180         case -ENODATA:
1181                 if (rw != READ) {
1182                         bio_io_error(bio);
1183                         break;
1184                 }
1185
1186                 if (tc->origin_dev) {
1187                         inc_all_io_entry(tc->pool, bio);
1188                         remap_to_origin_and_issue(tc, bio);
1189                         break;
1190                 }
1191
1192                 zero_fill_bio(bio);
1193                 bio_endio(bio, 0);
1194                 break;
1195
1196         default:
1197                 DMERR("dm_thin_find_block() failed, error = %d", r);
1198                 bio_io_error(bio);
1199                 break;
1200         }
1201 }
1202
1203 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1204 {
1205         bio_io_error(bio);
1206 }
1207
1208 static int need_commit_due_to_time(struct pool *pool)
1209 {
1210         return jiffies < pool->last_commit_jiffies ||
1211                jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1212 }
1213
1214 static void process_deferred_bios(struct pool *pool)
1215 {
1216         unsigned long flags;
1217         struct bio *bio;
1218         struct bio_list bios;
1219
1220         bio_list_init(&bios);
1221
1222         spin_lock_irqsave(&pool->lock, flags);
1223         bio_list_merge(&bios, &pool->deferred_bios);
1224         bio_list_init(&pool->deferred_bios);
1225         spin_unlock_irqrestore(&pool->lock, flags);
1226
1227         while ((bio = bio_list_pop(&bios))) {
1228                 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1229                 struct thin_c *tc = h->tc;
1230
1231                 /*
1232                  * If we've got no free new_mapping structs, and processing
1233                  * this bio might require one, we pause until there are some
1234                  * prepared mappings to process.
1235                  */
1236                 if (ensure_next_mapping(pool)) {
1237                         spin_lock_irqsave(&pool->lock, flags);
1238                         bio_list_merge(&pool->deferred_bios, &bios);
1239                         spin_unlock_irqrestore(&pool->lock, flags);
1240
1241                         break;
1242                 }
1243
1244                 if (bio->bi_rw & REQ_DISCARD)
1245                         pool->process_discard(tc, bio);
1246                 else
1247                         pool->process_bio(tc, bio);
1248         }
1249
1250         /*
1251          * If there are any deferred flush bios, we must commit
1252          * the metadata before issuing them.
1253          */
1254         bio_list_init(&bios);
1255         spin_lock_irqsave(&pool->lock, flags);
1256         bio_list_merge(&bios, &pool->deferred_flush_bios);
1257         bio_list_init(&pool->deferred_flush_bios);
1258         spin_unlock_irqrestore(&pool->lock, flags);
1259
1260         if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1261                 return;
1262
1263         if (commit_or_fallback(pool)) {
1264                 while ((bio = bio_list_pop(&bios)))
1265                         bio_io_error(bio);
1266                 return;
1267         }
1268         pool->last_commit_jiffies = jiffies;
1269
1270         while ((bio = bio_list_pop(&bios)))
1271                 generic_make_request(bio);
1272 }
1273
1274 static void do_worker(struct work_struct *ws)
1275 {
1276         struct pool *pool = container_of(ws, struct pool, worker);
1277
1278         process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1279         process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1280         process_deferred_bios(pool);
1281 }
1282
1283 /*
1284  * We want to commit periodically so that not too much
1285  * unwritten data builds up.
1286  */
1287 static void do_waker(struct work_struct *ws)
1288 {
1289         struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1290         wake_worker(pool);
1291         queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1292 }
1293
1294 /*----------------------------------------------------------------*/
1295
1296 static enum pool_mode get_pool_mode(struct pool *pool)
1297 {
1298         return pool->pf.mode;
1299 }
1300
1301 static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1302 {
1303         int r;
1304
1305         pool->pf.mode = mode;
1306
1307         switch (mode) {
1308         case PM_FAIL:
1309                 DMERR("switching pool to failure mode");
1310                 pool->process_bio = process_bio_fail;
1311                 pool->process_discard = process_bio_fail;
1312                 pool->process_prepared_mapping = process_prepared_mapping_fail;
1313                 pool->process_prepared_discard = process_prepared_discard_fail;
1314                 break;
1315
1316         case PM_READ_ONLY:
1317                 DMERR("switching pool to read-only mode");
1318                 r = dm_pool_abort_metadata(pool->pmd);
1319                 if (r) {
1320                         DMERR("aborting transaction failed");
1321                         set_pool_mode(pool, PM_FAIL);
1322                 } else {
1323                         dm_pool_metadata_read_only(pool->pmd);
1324                         pool->process_bio = process_bio_read_only;
1325                         pool->process_discard = process_discard;
1326                         pool->process_prepared_mapping = process_prepared_mapping_fail;
1327                         pool->process_prepared_discard = process_prepared_discard_passdown;
1328                 }
1329                 break;
1330
1331         case PM_WRITE:
1332                 pool->process_bio = process_bio;
1333                 pool->process_discard = process_discard;
1334                 pool->process_prepared_mapping = process_prepared_mapping;
1335                 pool->process_prepared_discard = process_prepared_discard;
1336                 break;
1337         }
1338 }
1339
1340 /*----------------------------------------------------------------*/
1341
1342 /*
1343  * Mapping functions.
1344  */
1345
1346 /*
1347  * Called only while mapping a thin bio to hand it over to the workqueue.
1348  */
1349 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1350 {
1351         unsigned long flags;
1352         struct pool *pool = tc->pool;
1353
1354         spin_lock_irqsave(&pool->lock, flags);
1355         bio_list_add(&pool->deferred_bios, bio);
1356         spin_unlock_irqrestore(&pool->lock, flags);
1357
1358         wake_worker(pool);
1359 }
1360
1361 static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1362 {
1363         struct pool *pool = tc->pool;
1364         struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1365
1366         h->tc = tc;
1367         h->shared_read_entry = NULL;
1368         h->all_io_entry = NULL;
1369         h->overwrite_mapping = NULL;
1370
1371         return h;
1372 }
1373
1374 /*
1375  * Non-blocking function called from the thin target's map function.
1376  */
1377 static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1378                         union map_info *map_context)
1379 {
1380         int r;
1381         struct thin_c *tc = ti->private;
1382         dm_block_t block = get_bio_block(tc, bio);
1383         struct dm_thin_device *td = tc->td;
1384         struct dm_thin_lookup_result result;
1385         struct dm_bio_prison_cell *cell1, *cell2;
1386         struct dm_cell_key key;
1387
1388         map_context->ptr = thin_hook_bio(tc, bio);
1389
1390         if (get_pool_mode(tc->pool) == PM_FAIL) {
1391                 bio_io_error(bio);
1392                 return DM_MAPIO_SUBMITTED;
1393         }
1394
1395         if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1396                 thin_defer_bio(tc, bio);
1397                 return DM_MAPIO_SUBMITTED;
1398         }
1399
1400         r = dm_thin_find_block(td, block, 0, &result);
1401
1402         /*
1403          * Note that we defer readahead too.
1404          */
1405         switch (r) {
1406         case 0:
1407                 if (unlikely(result.shared)) {
1408                         /*
1409                          * We have a race condition here between the
1410                          * result.shared value returned by the lookup and
1411                          * snapshot creation, which may cause new
1412                          * sharing.
1413                          *
1414                          * To avoid this always quiesce the origin before
1415                          * taking the snap.  You want to do this anyway to
1416                          * ensure a consistent application view
1417                          * (i.e. lockfs).
1418                          *
1419                          * More distant ancestors are irrelevant. The
1420                          * shared flag will be set in their case.
1421                          */
1422                         thin_defer_bio(tc, bio);
1423                         return DM_MAPIO_SUBMITTED;
1424                 }
1425
1426                 build_virtual_key(tc->td, block, &key);
1427                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
1428                         return DM_MAPIO_SUBMITTED;
1429
1430                 build_data_key(tc->td, result.block, &key);
1431                 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
1432                         cell_defer_no_holder(tc, cell1);
1433                         return DM_MAPIO_SUBMITTED;
1434                 }
1435
1436                 inc_all_io_entry(tc->pool, bio);
1437                 cell_defer_no_holder(tc, cell2);
1438                 cell_defer_no_holder(tc, cell1);
1439
1440                 remap(tc, bio, result.block);
1441                 return DM_MAPIO_REMAPPED;
1442
1443         case -ENODATA:
1444                 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1445                         /*
1446                          * This block isn't provisioned, and we have no way
1447                          * of doing so.  Just error it.
1448                          */
1449                         bio_io_error(bio);
1450                         r = DM_MAPIO_SUBMITTED;
1451                         break;
1452                 }
1453                 /* fall through */
1454
1455         case -EWOULDBLOCK:
1456                 /*
1457                  * In future, the failed dm_thin_find_block above could
1458                  * provide the hint to load the metadata into cache.
1459                  */
1460                 thin_defer_bio(tc, bio);
1461                 r = DM_MAPIO_SUBMITTED;
1462                 break;
1463
1464         default:
1465                 /*
1466                  * Must always call bio_io_error on failure.
1467                  * dm_thin_find_block can fail with -EINVAL if the
1468                  * pool is switched to fail-io mode.
1469                  */
1470                 bio_io_error(bio);
1471                 r = DM_MAPIO_SUBMITTED;
1472                 break;
1473         }
1474
1475         return r;
1476 }
1477
1478 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1479 {
1480         int r;
1481         unsigned long flags;
1482         struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1483
1484         spin_lock_irqsave(&pt->pool->lock, flags);
1485         r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1486         spin_unlock_irqrestore(&pt->pool->lock, flags);
1487
1488         if (!r) {
1489                 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1490                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1491         }
1492
1493         return r;
1494 }
1495
1496 static void __requeue_bios(struct pool *pool)
1497 {
1498         bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1499         bio_list_init(&pool->retry_on_resume_list);
1500 }
1501
1502 /*----------------------------------------------------------------
1503  * Binding of control targets to a pool object
1504  *--------------------------------------------------------------*/
1505 static bool data_dev_supports_discard(struct pool_c *pt)
1506 {
1507         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1508
1509         return q && blk_queue_discard(q);
1510 }
1511
1512 /*
1513  * If discard_passdown was enabled verify that the data device
1514  * supports discards.  Disable discard_passdown if not.
1515  */
1516 static void disable_passdown_if_not_supported(struct pool_c *pt)
1517 {
1518         struct pool *pool = pt->pool;
1519         struct block_device *data_bdev = pt->data_dev->bdev;
1520         struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1521         sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1522         const char *reason = NULL;
1523         char buf[BDEVNAME_SIZE];
1524
1525         if (!pt->adjusted_pf.discard_passdown)
1526                 return;
1527
1528         if (!data_dev_supports_discard(pt))
1529                 reason = "discard unsupported";
1530
1531         else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1532                 reason = "max discard sectors smaller than a block";
1533
1534         else if (data_limits->discard_granularity > block_size)
1535                 reason = "discard granularity larger than a block";
1536
1537         else if (block_size & (data_limits->discard_granularity - 1))
1538                 reason = "discard granularity not a factor of block size";
1539
1540         if (reason) {
1541                 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1542                 pt->adjusted_pf.discard_passdown = false;
1543         }
1544 }
1545
1546 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1547 {
1548         struct pool_c *pt = ti->private;
1549
1550         /*
1551          * We want to make sure that degraded pools are never upgraded.
1552          */
1553         enum pool_mode old_mode = pool->pf.mode;
1554         enum pool_mode new_mode = pt->adjusted_pf.mode;
1555
1556         if (old_mode > new_mode)
1557                 new_mode = old_mode;
1558
1559         pool->ti = ti;
1560         pool->low_water_blocks = pt->low_water_blocks;
1561         pool->pf = pt->adjusted_pf;
1562
1563         set_pool_mode(pool, new_mode);
1564
1565         return 0;
1566 }
1567
1568 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1569 {
1570         if (pool->ti == ti)
1571                 pool->ti = NULL;
1572 }
1573
1574 /*----------------------------------------------------------------
1575  * Pool creation
1576  *--------------------------------------------------------------*/
1577 /* Initialize pool features. */
1578 static void pool_features_init(struct pool_features *pf)
1579 {
1580         pf->mode = PM_WRITE;
1581         pf->zero_new_blocks = true;
1582         pf->discard_enabled = true;
1583         pf->discard_passdown = true;
1584 }
1585
1586 static void __pool_destroy(struct pool *pool)
1587 {
1588         __pool_table_remove(pool);
1589
1590         if (dm_pool_metadata_close(pool->pmd) < 0)
1591                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1592
1593         dm_bio_prison_destroy(pool->prison);
1594         dm_kcopyd_client_destroy(pool->copier);
1595
1596         if (pool->wq)
1597                 destroy_workqueue(pool->wq);
1598
1599         if (pool->next_mapping)
1600                 mempool_free(pool->next_mapping, pool->mapping_pool);
1601         mempool_destroy(pool->mapping_pool);
1602         mempool_destroy(pool->endio_hook_pool);
1603         dm_deferred_set_destroy(pool->shared_read_ds);
1604         dm_deferred_set_destroy(pool->all_io_ds);
1605         kfree(pool);
1606 }
1607
1608 static struct kmem_cache *_new_mapping_cache;
1609 static struct kmem_cache *_endio_hook_cache;
1610
1611 static struct pool *pool_create(struct mapped_device *pool_md,
1612                                 struct block_device *metadata_dev,
1613                                 unsigned long block_size,
1614                                 int read_only, char **error)
1615 {
1616         int r;
1617         void *err_p;
1618         struct pool *pool;
1619         struct dm_pool_metadata *pmd;
1620         bool format_device = read_only ? false : true;
1621
1622         pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1623         if (IS_ERR(pmd)) {
1624                 *error = "Error creating metadata object";
1625                 return (struct pool *)pmd;
1626         }
1627
1628         pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1629         if (!pool) {
1630                 *error = "Error allocating memory for pool";
1631                 err_p = ERR_PTR(-ENOMEM);
1632                 goto bad_pool;
1633         }
1634
1635         pool->pmd = pmd;
1636         pool->sectors_per_block = block_size;
1637         if (block_size & (block_size - 1))
1638                 pool->sectors_per_block_shift = -1;
1639         else
1640                 pool->sectors_per_block_shift = __ffs(block_size);
1641         pool->low_water_blocks = 0;
1642         pool_features_init(&pool->pf);
1643         pool->prison = dm_bio_prison_create(PRISON_CELLS);
1644         if (!pool->prison) {
1645                 *error = "Error creating pool's bio prison";
1646                 err_p = ERR_PTR(-ENOMEM);
1647                 goto bad_prison;
1648         }
1649
1650         pool->copier = dm_kcopyd_client_create();
1651         if (IS_ERR(pool->copier)) {
1652                 r = PTR_ERR(pool->copier);
1653                 *error = "Error creating pool's kcopyd client";
1654                 err_p = ERR_PTR(r);
1655                 goto bad_kcopyd_client;
1656         }
1657
1658         /*
1659          * Create singlethreaded workqueue that will service all devices
1660          * that use this metadata.
1661          */
1662         pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1663         if (!pool->wq) {
1664                 *error = "Error creating pool's workqueue";
1665                 err_p = ERR_PTR(-ENOMEM);
1666                 goto bad_wq;
1667         }
1668
1669         INIT_WORK(&pool->worker, do_worker);
1670         INIT_DELAYED_WORK(&pool->waker, do_waker);
1671         spin_lock_init(&pool->lock);
1672         bio_list_init(&pool->deferred_bios);
1673         bio_list_init(&pool->deferred_flush_bios);
1674         INIT_LIST_HEAD(&pool->prepared_mappings);
1675         INIT_LIST_HEAD(&pool->prepared_discards);
1676         pool->low_water_triggered = 0;
1677         pool->no_free_space = 0;
1678         bio_list_init(&pool->retry_on_resume_list);
1679
1680         pool->shared_read_ds = dm_deferred_set_create();
1681         if (!pool->shared_read_ds) {
1682                 *error = "Error creating pool's shared read deferred set";
1683                 err_p = ERR_PTR(-ENOMEM);
1684                 goto bad_shared_read_ds;
1685         }
1686
1687         pool->all_io_ds = dm_deferred_set_create();
1688         if (!pool->all_io_ds) {
1689                 *error = "Error creating pool's all io deferred set";
1690                 err_p = ERR_PTR(-ENOMEM);
1691                 goto bad_all_io_ds;
1692         }
1693
1694         pool->next_mapping = NULL;
1695         pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1696                                                       _new_mapping_cache);
1697         if (!pool->mapping_pool) {
1698                 *error = "Error creating pool's mapping mempool";
1699                 err_p = ERR_PTR(-ENOMEM);
1700                 goto bad_mapping_pool;
1701         }
1702
1703         pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1704                                                          _endio_hook_cache);
1705         if (!pool->endio_hook_pool) {
1706                 *error = "Error creating pool's endio_hook mempool";
1707                 err_p = ERR_PTR(-ENOMEM);
1708                 goto bad_endio_hook_pool;
1709         }
1710         pool->ref_count = 1;
1711         pool->last_commit_jiffies = jiffies;
1712         pool->pool_md = pool_md;
1713         pool->md_dev = metadata_dev;
1714         __pool_table_insert(pool);
1715
1716         return pool;
1717
1718 bad_endio_hook_pool:
1719         mempool_destroy(pool->mapping_pool);
1720 bad_mapping_pool:
1721         dm_deferred_set_destroy(pool->all_io_ds);
1722 bad_all_io_ds:
1723         dm_deferred_set_destroy(pool->shared_read_ds);
1724 bad_shared_read_ds:
1725         destroy_workqueue(pool->wq);
1726 bad_wq:
1727         dm_kcopyd_client_destroy(pool->copier);
1728 bad_kcopyd_client:
1729         dm_bio_prison_destroy(pool->prison);
1730 bad_prison:
1731         kfree(pool);
1732 bad_pool:
1733         if (dm_pool_metadata_close(pmd))
1734                 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1735
1736         return err_p;
1737 }
1738
1739 static void __pool_inc(struct pool *pool)
1740 {
1741         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1742         pool->ref_count++;
1743 }
1744
1745 static void __pool_dec(struct pool *pool)
1746 {
1747         BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1748         BUG_ON(!pool->ref_count);
1749         if (!--pool->ref_count)
1750                 __pool_destroy(pool);
1751 }
1752
1753 static struct pool *__pool_find(struct mapped_device *pool_md,
1754                                 struct block_device *metadata_dev,
1755                                 unsigned long block_size, int read_only,
1756                                 char **error, int *created)
1757 {
1758         struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1759
1760         if (pool) {
1761                 if (pool->pool_md != pool_md) {
1762                         *error = "metadata device already in use by a pool";
1763                         return ERR_PTR(-EBUSY);
1764                 }
1765                 __pool_inc(pool);
1766
1767         } else {
1768                 pool = __pool_table_lookup(pool_md);
1769                 if (pool) {
1770                         if (pool->md_dev != metadata_dev) {
1771                                 *error = "different pool cannot replace a pool";
1772                                 return ERR_PTR(-EINVAL);
1773                         }
1774                         __pool_inc(pool);
1775
1776                 } else {
1777                         pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1778                         *created = 1;
1779                 }
1780         }
1781
1782         return pool;
1783 }
1784
1785 /*----------------------------------------------------------------
1786  * Pool target methods
1787  *--------------------------------------------------------------*/
1788 static void pool_dtr(struct dm_target *ti)
1789 {
1790         struct pool_c *pt = ti->private;
1791
1792         mutex_lock(&dm_thin_pool_table.mutex);
1793
1794         unbind_control_target(pt->pool, ti);
1795         __pool_dec(pt->pool);
1796         dm_put_device(ti, pt->metadata_dev);
1797         dm_put_device(ti, pt->data_dev);
1798         kfree(pt);
1799
1800         mutex_unlock(&dm_thin_pool_table.mutex);
1801 }
1802
1803 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1804                                struct dm_target *ti)
1805 {
1806         int r;
1807         unsigned argc;
1808         const char *arg_name;
1809
1810         static struct dm_arg _args[] = {
1811                 {0, 3, "Invalid number of pool feature arguments"},
1812         };
1813
1814         /*
1815          * No feature arguments supplied.
1816          */
1817         if (!as->argc)
1818                 return 0;
1819
1820         r = dm_read_arg_group(_args, as, &argc, &ti->error);
1821         if (r)
1822                 return -EINVAL;
1823
1824         while (argc && !r) {
1825                 arg_name = dm_shift_arg(as);
1826                 argc--;
1827
1828                 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1829                         pf->zero_new_blocks = false;
1830
1831                 else if (!strcasecmp(arg_name, "ignore_discard"))
1832                         pf->discard_enabled = false;
1833
1834                 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1835                         pf->discard_passdown = false;
1836
1837                 else if (!strcasecmp(arg_name, "read_only"))
1838                         pf->mode = PM_READ_ONLY;
1839
1840                 else {
1841                         ti->error = "Unrecognised pool feature requested";
1842                         r = -EINVAL;
1843                         break;
1844                 }
1845         }
1846
1847         return r;
1848 }
1849
1850 /*
1851  * thin-pool <metadata dev> <data dev>
1852  *           <data block size (sectors)>
1853  *           <low water mark (blocks)>
1854  *           [<#feature args> [<arg>]*]
1855  *
1856  * Optional feature arguments are:
1857  *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1858  *           ignore_discard: disable discard
1859  *           no_discard_passdown: don't pass discards down to the data device
1860  */
1861 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1862 {
1863         int r, pool_created = 0;
1864         struct pool_c *pt;
1865         struct pool *pool;
1866         struct pool_features pf;
1867         struct dm_arg_set as;
1868         struct dm_dev *data_dev;
1869         unsigned long block_size;
1870         dm_block_t low_water_blocks;
1871         struct dm_dev *metadata_dev;
1872         sector_t metadata_dev_size;
1873         char b[BDEVNAME_SIZE];
1874
1875         /*
1876          * FIXME Remove validation from scope of lock.
1877          */
1878         mutex_lock(&dm_thin_pool_table.mutex);
1879
1880         if (argc < 4) {
1881                 ti->error = "Invalid argument count";
1882                 r = -EINVAL;
1883                 goto out_unlock;
1884         }
1885         as.argc = argc;
1886         as.argv = argv;
1887
1888         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1889         if (r) {
1890                 ti->error = "Error opening metadata block device";
1891                 goto out_unlock;
1892         }
1893
1894         metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1895         if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1896                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1897                        bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1898
1899         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1900         if (r) {
1901                 ti->error = "Error getting data device";
1902                 goto out_metadata;
1903         }
1904
1905         if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1906             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1907             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1908             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1909                 ti->error = "Invalid block size";
1910                 r = -EINVAL;
1911                 goto out;
1912         }
1913
1914         if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1915                 ti->error = "Invalid low water mark";
1916                 r = -EINVAL;
1917                 goto out;
1918         }
1919
1920         /*
1921          * Set default pool features.
1922          */
1923         pool_features_init(&pf);
1924
1925         dm_consume_args(&as, 4);
1926         r = parse_pool_features(&as, &pf, ti);
1927         if (r)
1928                 goto out;
1929
1930         pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1931         if (!pt) {
1932                 r = -ENOMEM;
1933                 goto out;
1934         }
1935
1936         pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1937                            block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
1938         if (IS_ERR(pool)) {
1939                 r = PTR_ERR(pool);
1940                 goto out_free_pt;
1941         }
1942
1943         /*
1944          * 'pool_created' reflects whether this is the first table load.
1945          * Top level discard support is not allowed to be changed after
1946          * initial load.  This would require a pool reload to trigger thin
1947          * device changes.
1948          */
1949         if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
1950                 ti->error = "Discard support cannot be disabled once enabled";
1951                 r = -EINVAL;
1952                 goto out_flags_changed;
1953         }
1954
1955         pt->pool = pool;
1956         pt->ti = ti;
1957         pt->metadata_dev = metadata_dev;
1958         pt->data_dev = data_dev;
1959         pt->low_water_blocks = low_water_blocks;
1960         pt->adjusted_pf = pt->requested_pf = pf;
1961         ti->num_flush_requests = 1;
1962
1963         /*
1964          * Only need to enable discards if the pool should pass
1965          * them down to the data device.  The thin device's discard
1966          * processing will cause mappings to be removed from the btree.
1967          */
1968         if (pf.discard_enabled && pf.discard_passdown) {
1969                 ti->num_discard_requests = 1;
1970
1971                 /*
1972                  * Setting 'discards_supported' circumvents the normal
1973                  * stacking of discard limits (this keeps the pool and
1974                  * thin devices' discard limits consistent).
1975                  */
1976                 ti->discards_supported = true;
1977                 ti->discard_zeroes_data_unsupported = true;
1978         }
1979         ti->private = pt;
1980
1981         pt->callbacks.congested_fn = pool_is_congested;
1982         dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1983
1984         mutex_unlock(&dm_thin_pool_table.mutex);
1985
1986         return 0;
1987
1988 out_flags_changed:
1989         __pool_dec(pool);
1990 out_free_pt:
1991         kfree(pt);
1992 out:
1993         dm_put_device(ti, data_dev);
1994 out_metadata:
1995         dm_put_device(ti, metadata_dev);
1996 out_unlock:
1997         mutex_unlock(&dm_thin_pool_table.mutex);
1998
1999         return r;
2000 }
2001
2002 static int pool_map(struct dm_target *ti, struct bio *bio,
2003                     union map_info *map_context)
2004 {
2005         int r;
2006         struct pool_c *pt = ti->private;
2007         struct pool *pool = pt->pool;
2008         unsigned long flags;
2009
2010         /*
2011          * As this is a singleton target, ti->begin is always zero.
2012          */
2013         spin_lock_irqsave(&pool->lock, flags);
2014         bio->bi_bdev = pt->data_dev->bdev;
2015         r = DM_MAPIO_REMAPPED;
2016         spin_unlock_irqrestore(&pool->lock, flags);
2017
2018         return r;
2019 }
2020
2021 /*
2022  * Retrieves the number of blocks of the data device from
2023  * the superblock and compares it to the actual device size,
2024  * thus resizing the data device in case it has grown.
2025  *
2026  * This both copes with opening preallocated data devices in the ctr
2027  * being followed by a resume
2028  * -and-
2029  * calling the resume method individually after userspace has
2030  * grown the data device in reaction to a table event.
2031  */
2032 static int pool_preresume(struct dm_target *ti)
2033 {
2034         int r;
2035         struct pool_c *pt = ti->private;
2036         struct pool *pool = pt->pool;
2037         sector_t data_size = ti->len;
2038         dm_block_t sb_data_size;
2039
2040         /*
2041          * Take control of the pool object.
2042          */
2043         r = bind_control_target(pool, ti);
2044         if (r)
2045                 return r;
2046
2047         (void) sector_div(data_size, pool->sectors_per_block);
2048
2049         r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2050         if (r) {
2051                 DMERR("failed to retrieve data device size");
2052                 return r;
2053         }
2054
2055         if (data_size < sb_data_size) {
2056                 DMERR("pool target too small, is %llu blocks (expected %llu)",
2057                       (unsigned long long)data_size, sb_data_size);
2058                 return -EINVAL;
2059
2060         } else if (data_size > sb_data_size) {
2061                 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2062                 if (r) {
2063                         DMERR("failed to resize data device");
2064                         /* FIXME Stricter than necessary: Rollback transaction instead here */
2065                         set_pool_mode(pool, PM_READ_ONLY);
2066                         return r;
2067                 }
2068
2069                 (void) commit_or_fallback(pool);
2070         }
2071
2072         return 0;
2073 }
2074
2075 static void pool_resume(struct dm_target *ti)
2076 {
2077         struct pool_c *pt = ti->private;
2078         struct pool *pool = pt->pool;
2079         unsigned long flags;
2080
2081         spin_lock_irqsave(&pool->lock, flags);
2082         pool->low_water_triggered = 0;
2083         pool->no_free_space = 0;
2084         __requeue_bios(pool);
2085         spin_unlock_irqrestore(&pool->lock, flags);
2086
2087         do_waker(&pool->waker.work);
2088 }
2089
2090 static void pool_postsuspend(struct dm_target *ti)
2091 {
2092         struct pool_c *pt = ti->private;
2093         struct pool *pool = pt->pool;
2094
2095         cancel_delayed_work(&pool->waker);
2096         flush_workqueue(pool->wq);
2097         (void) commit_or_fallback(pool);
2098 }
2099
2100 static int check_arg_count(unsigned argc, unsigned args_required)
2101 {
2102         if (argc != args_required) {
2103                 DMWARN("Message received with %u arguments instead of %u.",
2104                        argc, args_required);
2105                 return -EINVAL;
2106         }
2107
2108         return 0;
2109 }
2110
2111 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2112 {
2113         if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2114             *dev_id <= MAX_DEV_ID)
2115                 return 0;
2116
2117         if (warning)
2118                 DMWARN("Message received with invalid device id: %s", arg);
2119
2120         return -EINVAL;
2121 }
2122
2123 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2124 {
2125         dm_thin_id dev_id;
2126         int r;
2127
2128         r = check_arg_count(argc, 2);
2129         if (r)
2130                 return r;
2131
2132         r = read_dev_id(argv[1], &dev_id, 1);
2133         if (r)
2134                 return r;
2135
2136         r = dm_pool_create_thin(pool->pmd, dev_id);
2137         if (r) {
2138                 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2139                        argv[1]);
2140                 return r;
2141         }
2142
2143         return 0;
2144 }
2145
2146 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2147 {
2148         dm_thin_id dev_id;
2149         dm_thin_id origin_dev_id;
2150         int r;
2151
2152         r = check_arg_count(argc, 3);
2153         if (r)
2154                 return r;
2155
2156         r = read_dev_id(argv[1], &dev_id, 1);
2157         if (r)
2158                 return r;
2159
2160         r = read_dev_id(argv[2], &origin_dev_id, 1);
2161         if (r)
2162                 return r;
2163
2164         r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2165         if (r) {
2166                 DMWARN("Creation of new snapshot %s of device %s failed.",
2167                        argv[1], argv[2]);
2168                 return r;
2169         }
2170
2171         return 0;
2172 }
2173
2174 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2175 {
2176         dm_thin_id dev_id;
2177         int r;
2178
2179         r = check_arg_count(argc, 2);
2180         if (r)
2181                 return r;
2182
2183         r = read_dev_id(argv[1], &dev_id, 1);
2184         if (r)
2185                 return r;
2186
2187         r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2188         if (r)
2189                 DMWARN("Deletion of thin device %s failed.", argv[1]);
2190
2191         return r;
2192 }
2193
2194 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2195 {
2196         dm_thin_id old_id, new_id;
2197         int r;
2198
2199         r = check_arg_count(argc, 3);
2200         if (r)
2201                 return r;
2202
2203         if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2204                 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2205                 return -EINVAL;
2206         }
2207
2208         if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2209                 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2210                 return -EINVAL;
2211         }
2212
2213         r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2214         if (r) {
2215                 DMWARN("Failed to change transaction id from %s to %s.",
2216                        argv[1], argv[2]);
2217                 return r;
2218         }
2219
2220         return 0;
2221 }
2222
2223 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2224 {
2225         int r;
2226
2227         r = check_arg_count(argc, 1);
2228         if (r)
2229                 return r;
2230
2231         (void) commit_or_fallback(pool);
2232
2233         r = dm_pool_reserve_metadata_snap(pool->pmd);
2234         if (r)
2235                 DMWARN("reserve_metadata_snap message failed.");
2236
2237         return r;
2238 }
2239
2240 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2241 {
2242         int r;
2243
2244         r = check_arg_count(argc, 1);
2245         if (r)
2246                 return r;
2247
2248         r = dm_pool_release_metadata_snap(pool->pmd);
2249         if (r)
2250                 DMWARN("release_metadata_snap message failed.");
2251
2252         return r;
2253 }
2254
2255 /*
2256  * Messages supported:
2257  *   create_thin        <dev_id>
2258  *   create_snap        <dev_id> <origin_id>
2259  *   delete             <dev_id>
2260  *   trim               <dev_id> <new_size_in_sectors>
2261  *   set_transaction_id <current_trans_id> <new_trans_id>
2262  *   reserve_metadata_snap
2263  *   release_metadata_snap
2264  */
2265 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2266 {
2267         int r = -EINVAL;
2268         struct pool_c *pt = ti->private;
2269         struct pool *pool = pt->pool;
2270
2271         if (!strcasecmp(argv[0], "create_thin"))
2272                 r = process_create_thin_mesg(argc, argv, pool);
2273
2274         else if (!strcasecmp(argv[0], "create_snap"))
2275                 r = process_create_snap_mesg(argc, argv, pool);
2276
2277         else if (!strcasecmp(argv[0], "delete"))
2278                 r = process_delete_mesg(argc, argv, pool);
2279
2280         else if (!strcasecmp(argv[0], "set_transaction_id"))
2281                 r = process_set_transaction_id_mesg(argc, argv, pool);
2282
2283         else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2284                 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2285
2286         else if (!strcasecmp(argv[0], "release_metadata_snap"))
2287                 r = process_release_metadata_snap_mesg(argc, argv, pool);
2288
2289         else
2290                 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2291
2292         if (!r)
2293                 (void) commit_or_fallback(pool);
2294
2295         return r;
2296 }
2297
2298 static void emit_flags(struct pool_features *pf, char *result,
2299                        unsigned sz, unsigned maxlen)
2300 {
2301         unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2302                 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2303         DMEMIT("%u ", count);
2304
2305         if (!pf->zero_new_blocks)
2306                 DMEMIT("skip_block_zeroing ");
2307
2308         if (!pf->discard_enabled)
2309                 DMEMIT("ignore_discard ");
2310
2311         if (!pf->discard_passdown)
2312                 DMEMIT("no_discard_passdown ");
2313
2314         if (pf->mode == PM_READ_ONLY)
2315                 DMEMIT("read_only ");
2316 }
2317
2318 /*
2319  * Status line is:
2320  *    <transaction id> <used metadata sectors>/<total metadata sectors>
2321  *    <used data sectors>/<total data sectors> <held metadata root>
2322  */
2323 static int pool_status(struct dm_target *ti, status_type_t type,
2324                        unsigned status_flags, char *result, unsigned maxlen)
2325 {
2326         int r;
2327         unsigned sz = 0;
2328         uint64_t transaction_id;
2329         dm_block_t nr_free_blocks_data;
2330         dm_block_t nr_free_blocks_metadata;
2331         dm_block_t nr_blocks_data;
2332         dm_block_t nr_blocks_metadata;
2333         dm_block_t held_root;
2334         char buf[BDEVNAME_SIZE];
2335         char buf2[BDEVNAME_SIZE];
2336         struct pool_c *pt = ti->private;
2337         struct pool *pool = pt->pool;
2338
2339         switch (type) {
2340         case STATUSTYPE_INFO:
2341                 if (get_pool_mode(pool) == PM_FAIL) {
2342                         DMEMIT("Fail");
2343                         break;
2344                 }
2345
2346                 /* Commit to ensure statistics aren't out-of-date */
2347                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2348                         (void) commit_or_fallback(pool);
2349
2350                 r = dm_pool_get_metadata_transaction_id(pool->pmd,
2351                                                         &transaction_id);
2352                 if (r)
2353                         return r;
2354
2355                 r = dm_pool_get_free_metadata_block_count(pool->pmd,
2356                                                           &nr_free_blocks_metadata);
2357                 if (r)
2358                         return r;
2359
2360                 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2361                 if (r)
2362                         return r;
2363
2364                 r = dm_pool_get_free_block_count(pool->pmd,
2365                                                  &nr_free_blocks_data);
2366                 if (r)
2367                         return r;
2368
2369                 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2370                 if (r)
2371                         return r;
2372
2373                 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2374                 if (r)
2375                         return r;
2376
2377                 DMEMIT("%llu %llu/%llu %llu/%llu ",
2378                        (unsigned long long)transaction_id,
2379                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2380                        (unsigned long long)nr_blocks_metadata,
2381                        (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2382                        (unsigned long long)nr_blocks_data);
2383
2384                 if (held_root)
2385                         DMEMIT("%llu ", held_root);
2386                 else
2387                         DMEMIT("- ");
2388
2389                 if (pool->pf.mode == PM_READ_ONLY)
2390                         DMEMIT("ro ");
2391                 else
2392                         DMEMIT("rw ");
2393
2394                 if (!pool->pf.discard_enabled)
2395                         DMEMIT("ignore_discard");
2396                 else if (pool->pf.discard_passdown)
2397                         DMEMIT("discard_passdown");
2398                 else
2399                         DMEMIT("no_discard_passdown");
2400
2401                 break;
2402
2403         case STATUSTYPE_TABLE:
2404                 DMEMIT("%s %s %lu %llu ",
2405                        format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2406                        format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2407                        (unsigned long)pool->sectors_per_block,
2408                        (unsigned long long)pt->low_water_blocks);
2409                 emit_flags(&pt->requested_pf, result, sz, maxlen);
2410                 break;
2411         }
2412
2413         return 0;
2414 }
2415
2416 static int pool_iterate_devices(struct dm_target *ti,
2417                                 iterate_devices_callout_fn fn, void *data)
2418 {
2419         struct pool_c *pt = ti->private;
2420
2421         return fn(ti, pt->data_dev, 0, ti->len, data);
2422 }
2423
2424 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2425                       struct bio_vec *biovec, int max_size)
2426 {
2427         struct pool_c *pt = ti->private;
2428         struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2429
2430         if (!q->merge_bvec_fn)
2431                 return max_size;
2432
2433         bvm->bi_bdev = pt->data_dev->bdev;
2434
2435         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2436 }
2437
2438 static bool block_size_is_power_of_two(struct pool *pool)
2439 {
2440         return pool->sectors_per_block_shift >= 0;
2441 }
2442
2443 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2444 {
2445         struct pool *pool = pt->pool;
2446         struct queue_limits *data_limits;
2447
2448         limits->max_discard_sectors = pool->sectors_per_block;
2449
2450         /*
2451          * discard_granularity is just a hint, and not enforced.
2452          */
2453         if (pt->adjusted_pf.discard_passdown) {
2454                 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2455                 limits->discard_granularity = data_limits->discard_granularity;
2456         } else if (block_size_is_power_of_two(pool))
2457                 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2458         else
2459                 /*
2460                  * Use largest power of 2 that is a factor of sectors_per_block
2461                  * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
2462                  */
2463                 limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
2464                                                   DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
2465 }
2466
2467 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2468 {
2469         struct pool_c *pt = ti->private;
2470         struct pool *pool = pt->pool;
2471
2472         blk_limits_io_min(limits, 0);
2473         blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2474
2475         /*
2476          * pt->adjusted_pf is a staging area for the actual features to use.
2477          * They get transferred to the live pool in bind_control_target()
2478          * called from pool_preresume().
2479          */
2480         if (!pt->adjusted_pf.discard_enabled)
2481                 return;
2482
2483         disable_passdown_if_not_supported(pt);
2484
2485         set_discard_limits(pt, limits);
2486 }
2487
2488 static struct target_type pool_target = {
2489         .name = "thin-pool",
2490         .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2491                     DM_TARGET_IMMUTABLE,
2492         .version = {1, 6, 0},
2493         .module = THIS_MODULE,
2494         .ctr = pool_ctr,
2495         .dtr = pool_dtr,
2496         .map = pool_map,
2497         .postsuspend = pool_postsuspend,
2498         .preresume = pool_preresume,
2499         .resume = pool_resume,
2500         .message = pool_message,
2501         .status = pool_status,
2502         .merge = pool_merge,
2503         .iterate_devices = pool_iterate_devices,
2504         .io_hints = pool_io_hints,
2505 };
2506
2507 /*----------------------------------------------------------------
2508  * Thin target methods
2509  *--------------------------------------------------------------*/
2510 static void thin_dtr(struct dm_target *ti)
2511 {
2512         struct thin_c *tc = ti->private;
2513
2514         mutex_lock(&dm_thin_pool_table.mutex);
2515
2516         __pool_dec(tc->pool);
2517         dm_pool_close_thin_device(tc->td);
2518         dm_put_device(ti, tc->pool_dev);
2519         if (tc->origin_dev)
2520                 dm_put_device(ti, tc->origin_dev);
2521         kfree(tc);
2522
2523         mutex_unlock(&dm_thin_pool_table.mutex);
2524 }
2525
2526 /*
2527  * Thin target parameters:
2528  *
2529  * <pool_dev> <dev_id> [origin_dev]
2530  *
2531  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2532  * dev_id: the internal device identifier
2533  * origin_dev: a device external to the pool that should act as the origin
2534  *
2535  * If the pool device has discards disabled, they get disabled for the thin
2536  * device as well.
2537  */
2538 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2539 {
2540         int r;
2541         struct thin_c *tc;
2542         struct dm_dev *pool_dev, *origin_dev;
2543         struct mapped_device *pool_md;
2544
2545         mutex_lock(&dm_thin_pool_table.mutex);
2546
2547         if (argc != 2 && argc != 3) {
2548                 ti->error = "Invalid argument count";
2549                 r = -EINVAL;
2550                 goto out_unlock;
2551         }
2552
2553         tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2554         if (!tc) {
2555                 ti->error = "Out of memory";
2556                 r = -ENOMEM;
2557                 goto out_unlock;
2558         }
2559
2560         if (argc == 3) {
2561                 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2562                 if (r) {
2563                         ti->error = "Error opening origin device";
2564                         goto bad_origin_dev;
2565                 }
2566                 tc->origin_dev = origin_dev;
2567         }
2568
2569         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2570         if (r) {
2571                 ti->error = "Error opening pool device";
2572                 goto bad_pool_dev;
2573         }
2574         tc->pool_dev = pool_dev;
2575
2576         if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2577                 ti->error = "Invalid device id";
2578                 r = -EINVAL;
2579                 goto bad_common;
2580         }
2581
2582         pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2583         if (!pool_md) {
2584                 ti->error = "Couldn't get pool mapped device";
2585                 r = -EINVAL;
2586                 goto bad_common;
2587         }
2588
2589         tc->pool = __pool_table_lookup(pool_md);
2590         if (!tc->pool) {
2591                 ti->error = "Couldn't find pool object";
2592                 r = -EINVAL;
2593                 goto bad_pool_lookup;
2594         }
2595         __pool_inc(tc->pool);
2596
2597         if (get_pool_mode(tc->pool) == PM_FAIL) {
2598                 ti->error = "Couldn't open thin device, Pool is in fail mode";
2599                 goto bad_thin_open;
2600         }
2601
2602         r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2603         if (r) {
2604                 ti->error = "Couldn't open thin internal device";
2605                 goto bad_thin_open;
2606         }
2607
2608         r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2609         if (r)
2610                 goto bad_thin_open;
2611
2612         ti->num_flush_requests = 1;
2613         ti->flush_supported = true;
2614
2615         /* In case the pool supports discards, pass them on. */
2616         if (tc->pool->pf.discard_enabled) {
2617                 ti->discards_supported = true;
2618                 ti->num_discard_requests = 1;
2619                 ti->discard_zeroes_data_unsupported = true;
2620                 /* Discard requests must be split on a block boundary */
2621                 ti->split_discard_requests = true;
2622         }
2623
2624         dm_put(pool_md);
2625
2626         mutex_unlock(&dm_thin_pool_table.mutex);
2627
2628         return 0;
2629
2630 bad_thin_open:
2631         __pool_dec(tc->pool);
2632 bad_pool_lookup:
2633         dm_put(pool_md);
2634 bad_common:
2635         dm_put_device(ti, tc->pool_dev);
2636 bad_pool_dev:
2637         if (tc->origin_dev)
2638                 dm_put_device(ti, tc->origin_dev);
2639 bad_origin_dev:
2640         kfree(tc);
2641 out_unlock:
2642         mutex_unlock(&dm_thin_pool_table.mutex);
2643
2644         return r;
2645 }
2646
2647 static int thin_map(struct dm_target *ti, struct bio *bio,
2648                     union map_info *map_context)
2649 {
2650         bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2651
2652         return thin_bio_map(ti, bio, map_context);
2653 }
2654
2655 static int thin_endio(struct dm_target *ti,
2656                       struct bio *bio, int err,
2657                       union map_info *map_context)
2658 {
2659         unsigned long flags;
2660         struct dm_thin_endio_hook *h = map_context->ptr;
2661         struct list_head work;
2662         struct dm_thin_new_mapping *m, *tmp;
2663         struct pool *pool = h->tc->pool;
2664
2665         if (h->shared_read_entry) {
2666                 INIT_LIST_HEAD(&work);
2667                 dm_deferred_entry_dec(h->shared_read_entry, &work);
2668
2669                 spin_lock_irqsave(&pool->lock, flags);
2670                 list_for_each_entry_safe(m, tmp, &work, list) {
2671                         list_del(&m->list);
2672                         m->quiesced = 1;
2673                         __maybe_add_mapping(m);
2674                 }
2675                 spin_unlock_irqrestore(&pool->lock, flags);
2676         }
2677
2678         if (h->all_io_entry) {
2679                 INIT_LIST_HEAD(&work);
2680                 dm_deferred_entry_dec(h->all_io_entry, &work);
2681                 if (!list_empty(&work)) {
2682                         spin_lock_irqsave(&pool->lock, flags);
2683                         list_for_each_entry_safe(m, tmp, &work, list)
2684                                 list_add(&m->list, &pool->prepared_discards);
2685                         spin_unlock_irqrestore(&pool->lock, flags);
2686                         wake_worker(pool);
2687                 }
2688         }
2689
2690         mempool_free(h, pool->endio_hook_pool);
2691
2692         return 0;
2693 }
2694
2695 static void thin_postsuspend(struct dm_target *ti)
2696 {
2697         if (dm_noflush_suspending(ti))
2698                 requeue_io((struct thin_c *)ti->private);
2699 }
2700
2701 /*
2702  * <nr mapped sectors> <highest mapped sector>
2703  */
2704 static int thin_status(struct dm_target *ti, status_type_t type,
2705                        unsigned status_flags, char *result, unsigned maxlen)
2706 {
2707         int r;
2708         ssize_t sz = 0;
2709         dm_block_t mapped, highest;
2710         char buf[BDEVNAME_SIZE];
2711         struct thin_c *tc = ti->private;
2712
2713         if (get_pool_mode(tc->pool) == PM_FAIL) {
2714                 DMEMIT("Fail");
2715                 return 0;
2716         }
2717
2718         if (!tc->td)
2719                 DMEMIT("-");
2720         else {
2721                 switch (type) {
2722                 case STATUSTYPE_INFO:
2723                         r = dm_thin_get_mapped_count(tc->td, &mapped);
2724                         if (r)
2725                                 return r;
2726
2727                         r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2728                         if (r < 0)
2729                                 return r;
2730
2731                         DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2732                         if (r)
2733                                 DMEMIT("%llu", ((highest + 1) *
2734                                                 tc->pool->sectors_per_block) - 1);
2735                         else
2736                                 DMEMIT("-");
2737                         break;
2738
2739                 case STATUSTYPE_TABLE:
2740                         DMEMIT("%s %lu",
2741                                format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2742                                (unsigned long) tc->dev_id);
2743                         if (tc->origin_dev)
2744                                 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2745                         break;
2746                 }
2747         }
2748
2749         return 0;
2750 }
2751
2752 static int thin_iterate_devices(struct dm_target *ti,
2753                                 iterate_devices_callout_fn fn, void *data)
2754 {
2755         sector_t blocks;
2756         struct thin_c *tc = ti->private;
2757         struct pool *pool = tc->pool;
2758
2759         /*
2760          * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2761          * we follow a more convoluted path through to the pool's target.
2762          */
2763         if (!pool->ti)
2764                 return 0;       /* nothing is bound */
2765
2766         blocks = pool->ti->len;
2767         (void) sector_div(blocks, pool->sectors_per_block);
2768         if (blocks)
2769                 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
2770
2771         return 0;
2772 }
2773
2774 /*
2775  * A thin device always inherits its queue limits from its pool.
2776  */
2777 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2778 {
2779         struct thin_c *tc = ti->private;
2780
2781         *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
2782 }
2783
2784 static struct target_type thin_target = {
2785         .name = "thin",
2786         .version = {1, 5, 0},
2787         .module = THIS_MODULE,
2788         .ctr = thin_ctr,
2789         .dtr = thin_dtr,
2790         .map = thin_map,
2791         .end_io = thin_endio,
2792         .postsuspend = thin_postsuspend,
2793         .status = thin_status,
2794         .iterate_devices = thin_iterate_devices,
2795         .io_hints = thin_io_hints,
2796 };
2797
2798 /*----------------------------------------------------------------*/
2799
2800 static int __init dm_thin_init(void)
2801 {
2802         int r;
2803
2804         pool_table_init();
2805
2806         r = dm_register_target(&thin_target);
2807         if (r)
2808                 return r;
2809
2810         r = dm_register_target(&pool_target);
2811         if (r)
2812                 goto bad_pool_target;
2813
2814         r = -ENOMEM;
2815
2816         _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2817         if (!_new_mapping_cache)
2818                 goto bad_new_mapping_cache;
2819
2820         _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2821         if (!_endio_hook_cache)
2822                 goto bad_endio_hook_cache;
2823
2824         return 0;
2825
2826 bad_endio_hook_cache:
2827         kmem_cache_destroy(_new_mapping_cache);
2828 bad_new_mapping_cache:
2829         dm_unregister_target(&pool_target);
2830 bad_pool_target:
2831         dm_unregister_target(&thin_target);
2832
2833         return r;
2834 }
2835
2836 static void dm_thin_exit(void)
2837 {
2838         dm_unregister_target(&thin_target);
2839         dm_unregister_target(&pool_target);
2840
2841         kmem_cache_destroy(_new_mapping_cache);
2842         kmem_cache_destroy(_endio_hook_cache);
2843 }
2844
2845 module_init(dm_thin_init);
2846 module_exit(dm_thin_exit);
2847
2848 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2849 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2850 MODULE_LICENSE("GPL");